Compare commits

..

2 Commits

8 changed files with 119 additions and 773 deletions

Submodule .yoyo/snapshot deleted from 5e783b71e7

View File

@@ -1,23 +1,22 @@
import asyncio import asyncio
import hashlib import time
from typing import List, Optional
import os import os
import shlex import sys
import shutil import shutil
import tempfile
import psutil
import signal import signal
import subprocess import subprocess
import sys import shlex
import tempfile
import time
import warnings
from typing import List, Optional
import psutil
from playwright.async_api import BrowserContext from playwright.async_api import BrowserContext
import hashlib
from .async_configs import BrowserConfig, CrawlerRunConfig
from .config import DOWNLOAD_PAGE_TIMEOUT
from .js_snippet import load_js_script from .js_snippet import load_js_script
from .config import DOWNLOAD_PAGE_TIMEOUT
from .async_configs import BrowserConfig, CrawlerRunConfig
from .utils import get_chromium_path from .utils import get_chromium_path
import warnings
BROWSER_DISABLE_OPTIONS = [ BROWSER_DISABLE_OPTIONS = [
"--disable-background-networking", "--disable-background-networking",
@@ -93,25 +92,21 @@ class ManagedBrowser:
if config.light_mode: if config.light_mode:
flags.extend(BROWSER_DISABLE_OPTIONS) flags.extend(BROWSER_DISABLE_OPTIONS)
if config.text_mode: if config.text_mode:
flags.extend( flags.extend([
[ "--blink-settings=imagesEnabled=false",
"--blink-settings=imagesEnabled=false", "--disable-remote-fonts",
"--disable-remote-fonts", "--disable-images",
"--disable-images", "--disable-javascript",
"--disable-javascript", "--disable-software-rasterizer",
"--disable-software-rasterizer", "--disable-dev-shm-usage",
"--disable-dev-shm-usage", ])
]
)
# proxy support # proxy support
if config.proxy: if config.proxy:
flags.append(f"--proxy-server={config.proxy}") flags.append(f"--proxy-server={config.proxy}")
elif config.proxy_config: elif config.proxy_config:
creds = "" creds = ""
if config.proxy_config.username and config.proxy_config.password: if config.proxy_config.username and config.proxy_config.password:
creds = ( creds = f"{config.proxy_config.username}:{config.proxy_config.password}@"
f"{config.proxy_config.username}:{config.proxy_config.password}@"
)
flags.append(f"--proxy-server={creds}{config.proxy_config.server}") flags.append(f"--proxy-server={creds}{config.proxy_config.server}")
# dedupe # dedupe
return list(dict.fromkeys(flags)) return list(dict.fromkeys(flags))
@@ -188,6 +183,7 @@ class ManagedBrowser:
if self.browser_config.extra_args: if self.browser_config.extra_args:
args.extend(self.browser_config.extra_args) args.extend(self.browser_config.extra_args)
# ── make sure no old Chromium instance is owning the same port/profile ── # ── make sure no old Chromium instance is owning the same port/profile ──
try: try:
if sys.platform == "win32": if sys.platform == "win32":
@@ -204,9 +200,7 @@ class ManagedBrowser:
else: # macOS / Linux else: # macOS / Linux
# kill any process listening on the same debugging port # kill any process listening on the same debugging port
pids = ( pids = (
subprocess.check_output( subprocess.check_output(shlex.split(f"lsof -t -i:{self.debugging_port}"))
shlex.split(f"lsof -t -i:{self.debugging_port}")
)
.decode() .decode()
.strip() .strip()
.splitlines() .splitlines()
@@ -227,6 +221,7 @@ class ManagedBrowser:
# non-fatal — we'll try to start anyway, but log what happened # non-fatal — we'll try to start anyway, but log what happened
self.logger.warning(f"pre-launch cleanup failed: {_e}", tag="BROWSER") self.logger.warning(f"pre-launch cleanup failed: {_e}", tag="BROWSER")
# Start browser process # Start browser process
try: try:
# Use DETACHED_PROCESS flag on Windows to fully detach the process # Use DETACHED_PROCESS flag on Windows to fully detach the process
@@ -236,21 +231,21 @@ class ManagedBrowser:
args, args,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
creationflags=subprocess.DETACHED_PROCESS creationflags=subprocess.DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
| subprocess.CREATE_NEW_PROCESS_GROUP,
) )
else: else:
self.browser_process = subprocess.Popen( self.browser_process = subprocess.Popen(
args, args,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=os.setpgrp, # Start in a new process group preexec_fn=os.setpgrp # Start in a new process group
) )
# If verbose is True print args used to run the process # If verbose is True print args used to run the process
if self.logger and self.browser_config.verbose: if self.logger and self.browser_config.verbose:
self.logger.debug( self.logger.debug(
f"Starting browser with args: {' '.join(args)}", tag="BROWSER" f"Starting browser with args: {' '.join(args)}",
tag="BROWSER"
) )
# We'll monitor for a short time to make sure it starts properly, but won't keep monitoring # We'll monitor for a short time to make sure it starts properly, but won't keep monitoring
@@ -412,14 +407,7 @@ class ManagedBrowser:
if sys.platform == "win32": if sys.platform == "win32":
# On Windows we might need taskkill for detached processes # On Windows we might need taskkill for detached processes
try: try:
subprocess.run( subprocess.run(["taskkill", "/F", "/PID", str(self.browser_process.pid)])
[
"taskkill",
"/F",
"/PID",
str(self.browser_process.pid),
]
)
except Exception: except Exception:
self.browser_process.kill() self.browser_process.kill()
else: else:
@@ -467,9 +455,7 @@ class ManagedBrowser:
# Create a BrowserProfiler instance and delegate to it # Create a BrowserProfiler instance and delegate to it
profiler = BrowserProfiler(logger=logger) profiler = BrowserProfiler(logger=logger)
return await profiler.create_profile( return await profiler.create_profile(profile_name=profile_name, browser_config=browser_config)
profile_name=profile_name, browser_config=browser_config
)
@staticmethod @staticmethod
def list_profiles(): def list_profiles():
@@ -569,6 +555,7 @@ async def clone_runtime_state(
return dst return dst
class BrowserManager: class BrowserManager:
""" """
Manages the browser instance and context. Manages the browser instance and context.
@@ -595,9 +582,7 @@ class BrowserManager:
cls._playwright_instance = await async_playwright().start() cls._playwright_instance = await async_playwright().start()
return cls._playwright_instance return cls._playwright_instance
def __init__( def __init__(self, browser_config: BrowserConfig, logger=None, use_undetected: bool = False):
self, browser_config: BrowserConfig, logger=None, use_undetected: bool = False
):
""" """
Initialize the BrowserManager with a browser configuration. Initialize the BrowserManager with a browser configuration.
@@ -633,7 +618,6 @@ class BrowserManager:
self._stealth_adapter = None self._stealth_adapter = None
if self.config.enable_stealth and not self.use_undetected: if self.config.enable_stealth and not self.use_undetected:
from .browser_adapter import StealthAdapter from .browser_adapter import StealthAdapter
self._stealth_adapter = StealthAdapter() self._stealth_adapter = StealthAdapter()
# Initialize ManagedBrowser if needed # Initialize ManagedBrowser if needed
@@ -673,11 +657,7 @@ class BrowserManager:
if self.config.cdp_url or self.config.use_managed_browser: if self.config.cdp_url or self.config.use_managed_browser:
self.config.use_managed_browser = True self.config.use_managed_browser = True
cdp_url = ( cdp_url = await self.managed_browser.start() if not self.config.cdp_url else self.config.cdp_url
await self.managed_browser.start()
if not self.config.cdp_url
else self.config.cdp_url
)
self.browser = await self.playwright.chromium.connect_over_cdp(cdp_url) self.browser = await self.playwright.chromium.connect_over_cdp(cdp_url)
contexts = self.browser.contexts contexts = self.browser.contexts
if contexts: if contexts:
@@ -698,6 +678,7 @@ class BrowserManager:
self.default_context = self.browser self.default_context = self.browser
def _build_browser_args(self) -> dict: def _build_browser_args(self) -> dict:
"""Build browser launch arguments from config.""" """Build browser launch arguments from config."""
args = [ args = [
@@ -820,9 +801,9 @@ class BrowserManager:
context.set_default_navigation_timeout(DOWNLOAD_PAGE_TIMEOUT) context.set_default_navigation_timeout(DOWNLOAD_PAGE_TIMEOUT)
if self.config.downloads_path: if self.config.downloads_path:
context._impl_obj._options["accept_downloads"] = True context._impl_obj._options["accept_downloads"] = True
context._impl_obj._options["downloads_path"] = ( context._impl_obj._options[
self.config.downloads_path "downloads_path"
) ] = self.config.downloads_path
# Handle user agent and browser hints # Handle user agent and browser hints
if self.config.user_agent: if self.config.user_agent:
@@ -945,12 +926,10 @@ class BrowserManager:
"server": crawlerRunConfig.proxy_config.server, "server": crawlerRunConfig.proxy_config.server,
} }
if crawlerRunConfig.proxy_config.username: if crawlerRunConfig.proxy_config.username:
proxy_settings.update( proxy_settings.update({
{ "username": crawlerRunConfig.proxy_config.username,
"username": crawlerRunConfig.proxy_config.username, "password": crawlerRunConfig.proxy_config.password,
"password": crawlerRunConfig.proxy_config.password, })
}
)
context_settings["proxy"] = proxy_settings context_settings["proxy"] = proxy_settings
if self.config.text_mode: if self.config.text_mode:
@@ -1008,7 +987,7 @@ class BrowserManager:
"cache_mode", "cache_mode",
"content_filter", "content_filter",
"semaphore_count", "semaphore_count",
"url", "url"
] ]
# Do NOT exclude locale, timezone_id, or geolocation as these DO affect browser context # Do NOT exclude locale, timezone_id, or geolocation as these DO affect browser context
@@ -1034,7 +1013,7 @@ class BrowserManager:
self.logger.warning( self.logger.warning(
message="Failed to apply stealth to page: {error}", message="Failed to apply stealth to page: {error}",
tag="STEALTH", tag="STEALTH",
params={"error": str(e)}, params={"error": str(e)}
) )
async def get_page(self, crawlerRunConfig: CrawlerRunConfig): async def get_page(self, crawlerRunConfig: CrawlerRunConfig):
@@ -1060,10 +1039,8 @@ class BrowserManager:
if self.config.use_managed_browser: if self.config.use_managed_browser:
if self.config.storage_state: if self.config.storage_state:
context = await self.create_browser_context(crawlerRunConfig) context = await self.create_browser_context(crawlerRunConfig)
ctx = self.default_context # default context, one window only ctx = self.default_context # default context, one window only
ctx = await clone_runtime_state( ctx = await clone_runtime_state(context, ctx, crawlerRunConfig, self.config)
context, ctx, crawlerRunConfig, self.config
)
# Avoid concurrent new_page on shared persistent context # Avoid concurrent new_page on shared persistent context
# See GH-1198: context.pages can be empty under races # See GH-1198: context.pages can be empty under races
async with self._page_lock: async with self._page_lock:
@@ -1075,21 +1052,14 @@ class BrowserManager:
page = next((p for p in pages if p.url == crawlerRunConfig.url), None) page = next((p for p in pages if p.url == crawlerRunConfig.url), None)
if not page: if not page:
if pages: if pages:
# FIX: Always create a new page for managed browsers to support concurrent crawling page = pages[0]
# Previously: page = pages[0]
async with self._page_lock:
page = await context.new_page()
await self._apply_stealth_to_page(page)
else: else:
# Double-check under lock to avoid TOCTOU and ensure only # Double-check under lock to avoid TOCTOU and ensure only
# one task calls new_page when pages=[] concurrently # one task calls new_page when pages=[] concurrently
async with self._page_lock: async with self._page_lock:
pages = context.pages pages = context.pages
if pages: if pages:
# FIX: Always create a new page for managed browsers to support concurrent crawling page = pages[0]
# Previously: page = pages[0]
page = await context.new_page()
await self._apply_stealth_to_page(page)
else: else:
page = await context.new_page() page = await context.new_page()
await self._apply_stealth_to_page(page) await self._apply_stealth_to_page(page)
@@ -1161,7 +1131,7 @@ class BrowserManager:
self.logger.error( self.logger.error(
message="Error closing context: {error}", message="Error closing context: {error}",
tag="ERROR", tag="ERROR",
params={"error": str(e)}, params={"error": str(e)}
) )
self.contexts_by_config.clear() self.contexts_by_config.clear()

View File

@@ -2,8 +2,8 @@
import asyncio, json, hashlib, time, psutil import asyncio, json, hashlib, time, psutil
from contextlib import suppress from contextlib import suppress
from typing import Dict from typing import Dict
from crawl4ai import AsyncWebCrawler, BrowserConfig from crawl4ai import AsyncWebCrawler, BrowserConfig, BrowserAdapter
from typing import Dict from typing import Dict ,Optional
from utils import load_config from utils import load_config
CONFIG = load_config() CONFIG = load_config()
@@ -15,11 +15,22 @@ LOCK = asyncio.Lock()
MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) # % RAM refuse new browsers above this MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) # % RAM refuse new browsers above this
IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800) # close if unused for 30min IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800) # close if unused for 30min
def _sig(cfg: BrowserConfig) -> str:
payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":")) def _sig(cfg: BrowserConfig, adapter: Optional[BrowserAdapter] = None) -> str:
try:
config_payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",", ":"))
except (TypeError, ValueError):
# Fallback to string representation if JSON serialization fails
config_payload = str(cfg.to_dict())
adapter_name = adapter.__class__.__name__ if adapter else "PlaywrightAdapter"
payload = f"{config_payload}:{adapter_name}"
return hashlib.sha1(payload.encode()).hexdigest() return hashlib.sha1(payload.encode()).hexdigest()
async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
async def get_crawler(
cfg: BrowserConfig, adapter: Optional[BrowserAdapter] = None
) -> AsyncWebCrawler:
sig = None
try: try:
sig = _sig(cfg) sig = _sig(cfg)
async with LOCK: async with LOCK:
@@ -37,12 +48,13 @@ async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
except Exception as e: except Exception as e:
raise RuntimeError(f"Failed to start browser: {e}") raise RuntimeError(f"Failed to start browser: {e}")
finally: finally:
if sig in POOL: if sig:
LAST_USED[sig] = time.time() if sig in POOL:
else: LAST_USED[sig] = time.time()
# If we failed to start the browser, we should remove it from the pool else:
POOL.pop(sig, None) # If we failed to start the browser, we should remove it from the pool
LAST_USED.pop(sig, None) POOL.pop(sig, None)
LAST_USED.pop(sig, None)
# If we failed to start the browser, we should remove it from the pool # If we failed to start the browser, we should remove it from the pool
async def close_all(): async def close_all():
async with LOCK: async with LOCK:

View File

@@ -31,7 +31,7 @@ dependencies = [
"rank-bm25~=0.2", "rank-bm25~=0.2",
"snowballstemmer~=2.2", "snowballstemmer~=2.2",
"pydantic>=2.10", "pydantic>=2.10",
"pyOpenSSL>=25.3.0", "pyOpenSSL>=24.3.0",
"psutil>=6.1.1", "psutil>=6.1.1",
"PyYAML>=6.0", "PyYAML>=6.0",
"nltk>=3.9.1", "nltk>=3.9.1",

View File

@@ -19,7 +19,7 @@ rank-bm25~=0.2
colorama~=0.4 colorama~=0.4
snowballstemmer~=2.2 snowballstemmer~=2.2
pydantic>=2.10 pydantic>=2.10
pyOpenSSL>=25.3.0 pyOpenSSL>=24.3.0
psutil>=6.1.1 psutil>=6.1.1
PyYAML>=6.0 PyYAML>=6.0
nltk>=3.9.1 nltk>=3.9.1

View File

@@ -1,283 +0,0 @@
"""
Compact test suite for CDP concurrency fix.
This file consolidates all tests related to the CDP concurrency fix for
AsyncWebCrawler.arun_many() with managed browsers.
The bug was that all concurrent tasks were fighting over one shared tab,
causing failures. This has been fixed by modifying the get_page() method
in browser_manager.py to always create new pages instead of reusing pages[0].
"""
import asyncio
import shutil
import sys
import tempfile
from pathlib import Path
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from crawl4ai import AsyncWebCrawler, CacheMode, CrawlerRunConfig
from crawl4ai.async_configs import BrowserConfig
# =============================================================================
# TEST 1: Basic arun_many functionality
# =============================================================================
async def test_basic_arun_many():
"""Test that arun_many works correctly with basic configuration."""
print("=== TEST 1: Basic arun_many functionality ===")
# Configuration to bypass cache for testing
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
# Test URLs - using reliable test URLs
test_urls = [
"https://httpbin.org/html", # Simple HTML page
"https://httpbin.org/json", # Simple JSON response
]
async with AsyncWebCrawler() as crawler:
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
# This should work correctly
result = await crawler.arun_many(urls=test_urls, config=config)
# Simple verification - if we get here without exception, the basic functionality works
print(f"✓ arun_many completed successfully")
return True
# =============================================================================
# TEST 2: CDP Browser with Managed Configuration
# =============================================================================
async def test_arun_many_with_managed_cdp_browser():
"""Test that arun_many works correctly with managed CDP browsers."""
print("\n=== TEST 2: arun_many with managed CDP browser ===")
# Create a temporary user data directory for the CDP browser
user_data_dir = tempfile.mkdtemp(prefix="crawl4ai-cdp-test-")
try:
# Configure browser to use managed CDP mode
browser_config = BrowserConfig(
use_managed_browser=True,
browser_type="chromium",
headless=True,
user_data_dir=user_data_dir,
verbose=True,
)
# Configuration to bypass cache for testing
crawler_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
page_timeout=60000,
wait_until="domcontentloaded",
)
# Test URLs - using reliable test URLs
test_urls = [
"https://httpbin.org/html", # Simple HTML page
"https://httpbin.org/json", # Simple JSON response
]
# Create crawler with CDP browser configuration
async with AsyncWebCrawler(config=browser_config) as crawler:
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
# This should work correctly with our fix
result = await crawler.arun_many(urls=test_urls, config=crawler_config)
print(f"✓ arun_many completed successfully with managed CDP browser")
return True
except Exception as e:
print(f"❌ Test failed with error: {str(e)}")
raise
finally:
# Clean up temporary directory
try:
shutil.rmtree(user_data_dir, ignore_errors=True)
except:
pass
# =============================================================================
# TEST 3: Concurrency Verification
# =============================================================================
async def test_concurrent_crawling():
"""Test concurrent crawling to verify the fix works."""
print("\n=== TEST 3: Concurrent crawling verification ===")
# Configuration to bypass cache for testing
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
# Test URLs - using reliable test URLs
test_urls = [
"https://httpbin.org/html", # Simple HTML page
"https://httpbin.org/json", # Simple JSON response
"https://httpbin.org/uuid", # Simple UUID response
"https://example.com/", # Standard example page
]
async with AsyncWebCrawler() as crawler:
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
# This should work correctly with our fix
results = await crawler.arun_many(urls=test_urls, config=config)
# Simple verification - if we get here without exception, the fix works
print("✓ arun_many completed successfully with concurrent crawling")
return True
# =============================================================================
# TEST 4: Concurrency Fix Demonstration
# =============================================================================
async def test_concurrency_fix():
"""Demonstrate that the concurrency fix works."""
print("\n=== TEST 4: Concurrency fix demonstration ===")
# Configuration to bypass cache for testing
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
# Test URLs - using reliable test URLs
test_urls = [
"https://httpbin.org/html", # Simple HTML page
"https://httpbin.org/json", # Simple JSON response
"https://httpbin.org/uuid", # Simple UUID response
]
async with AsyncWebCrawler() as crawler:
print(f"Testing concurrent crawling of {len(test_urls)} URLs...")
# This should work correctly with our fix
results = await crawler.arun_many(urls=test_urls, config=config)
# Simple verification - if we get here without exception, the fix works
print("✓ arun_many completed successfully with concurrent crawling")
return True
# =============================================================================
# TEST 5: Before/After Behavior Comparison
# =============================================================================
async def test_before_after_behavior():
"""Test that demonstrates concurrent crawling works correctly after the fix."""
print("\n=== TEST 5: Before/After behavior test ===")
# Configuration to bypass cache for testing
config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
# Test URLs - using reliable test URLs that would stress the concurrency system
test_urls = [
"https://httpbin.org/delay/1", # Delayed response to increase chance of contention
"https://httpbin.org/delay/2", # Delayed response to increase chance of contention
"https://httpbin.org/uuid", # Fast response
"https://httpbin.org/json", # Fast response
]
async with AsyncWebCrawler() as crawler:
print(
f"Testing concurrent crawling of {len(test_urls)} URLs (including delayed responses)..."
)
print(
"This test would have failed before the concurrency fix due to page contention."
)
# This should work correctly with our fix
results = await crawler.arun_many(urls=test_urls, config=config)
# Simple verification - if we get here without exception, the fix works
print("✓ arun_many completed successfully with concurrent crawling")
print("✓ No page contention issues detected")
return True
# =============================================================================
# TEST 6: Reference Pattern Test
# =============================================================================
async def test_reference_pattern():
"""Main test function following reference pattern."""
print("\n=== TEST 6: Reference pattern test ===")
# Configure crawler settings
crawler_cfg = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS,
page_timeout=60000,
wait_until="domcontentloaded",
)
# Define URLs to crawl
URLS = [
"https://httpbin.org/html",
"https://httpbin.org/json",
"https://httpbin.org/uuid",
]
# Crawl all URLs using arun_many
async with AsyncWebCrawler() as crawler:
print(f"Testing concurrent crawling of {len(URLS)} URLs...")
results = await crawler.arun_many(urls=URLS, config=crawler_cfg)
# Simple verification - if we get here without exception, the fix works
print("✓ arun_many completed successfully with concurrent crawling")
print("✅ Reference pattern test completed successfully!")
# =============================================================================
# MAIN EXECUTION
# =============================================================================
async def main():
"""Run all tests."""
print("Running compact CDP concurrency test suite...")
print("=" * 60)
tests = [
test_basic_arun_many,
test_arun_many_with_managed_cdp_browser,
test_concurrent_crawling,
test_concurrency_fix,
test_before_after_behavior,
test_reference_pattern,
]
passed = 0
failed = 0
for test_func in tests:
try:
await test_func()
passed += 1
except Exception as e:
print(f"❌ Test failed: {str(e)}")
failed += 1
print("\n" + "=" * 60)
print(f"Test Results: {passed} passed, {failed} failed")
if failed == 0:
print("🎉 All tests passed! The CDP concurrency fix is working correctly.")
return True
else:
print(f"{failed} test(s) failed!")
return False
if __name__ == "__main__":
success = asyncio.run(main())
sys.exit(0 if success else 1)

View File

@@ -1,168 +0,0 @@
"""
Lightweight test to verify pyOpenSSL security fix (Issue #1545).
This test verifies the security requirements are met:
1. pyOpenSSL >= 25.3.0 is installed
2. cryptography >= 45.0.7 is installed (above vulnerable range)
3. SSL/TLS functionality works correctly
This test can run without full crawl4ai dependencies installed.
"""
import sys
from packaging import version
def test_package_versions():
"""Test that package versions meet security requirements."""
print("=" * 70)
print("TEST: Package Version Security Requirements (Issue #1545)")
print("=" * 70)
all_passed = True
# Test pyOpenSSL version
try:
import OpenSSL
pyopenssl_version = OpenSSL.__version__
print(f"\n✓ pyOpenSSL is installed: {pyopenssl_version}")
if version.parse(pyopenssl_version) >= version.parse("25.3.0"):
print(f" ✓ PASS: pyOpenSSL {pyopenssl_version} >= 25.3.0 (required)")
else:
print(f" ✗ FAIL: pyOpenSSL {pyopenssl_version} < 25.3.0 (required)")
all_passed = False
except ImportError as e:
print(f"\n✗ FAIL: pyOpenSSL not installed - {e}")
all_passed = False
# Test cryptography version
try:
import cryptography
crypto_version = cryptography.__version__
print(f"\n✓ cryptography is installed: {crypto_version}")
# The vulnerable range is >=37.0.0 & <43.0.1
# We need >= 45.0.7 to be safe
if version.parse(crypto_version) >= version.parse("45.0.7"):
print(f" ✓ PASS: cryptography {crypto_version} >= 45.0.7 (secure)")
print(f" ✓ NOT in vulnerable range (37.0.0 to 43.0.0)")
elif version.parse(crypto_version) >= version.parse("37.0.0") and version.parse(crypto_version) < version.parse("43.0.1"):
print(f" ✗ FAIL: cryptography {crypto_version} is VULNERABLE")
print(f" ✗ Version is in vulnerable range (>=37.0.0 & <43.0.1)")
all_passed = False
else:
print(f" ⚠ WARNING: cryptography {crypto_version} < 45.0.7")
print(f" ⚠ May not meet security requirements")
except ImportError as e:
print(f"\n✗ FAIL: cryptography not installed - {e}")
all_passed = False
return all_passed
def test_ssl_basic_functionality():
"""Test that SSL/TLS basic functionality works."""
print("\n" + "=" * 70)
print("TEST: SSL/TLS Basic Functionality")
print("=" * 70)
try:
import OpenSSL.SSL
# Create a basic SSL context to verify functionality
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
print("\n✓ SSL Context created successfully")
print(" ✓ PASS: SSL/TLS functionality is working")
return True
except Exception as e:
print(f"\n✗ FAIL: SSL functionality test failed - {e}")
return False
def test_pyopenssl_crypto_integration():
"""Test that pyOpenSSL and cryptography integration works."""
print("\n" + "=" * 70)
print("TEST: pyOpenSSL <-> cryptography Integration")
print("=" * 70)
try:
from OpenSSL import crypto
# Generate a simple key pair to test integration
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
print("\n✓ Generated RSA key pair successfully")
print(" ✓ PASS: pyOpenSSL and cryptography are properly integrated")
return True
except Exception as e:
print(f"\n✗ FAIL: Integration test failed - {e}")
import traceback
traceback.print_exc()
return False
def main():
"""Run all security tests."""
print("\n")
print("" + "=" * 68 + "")
print("║ pyOpenSSL Security Fix Verification - Issue #1545 ║")
print("" + "=" * 68 + "")
print("\nVerifying that the pyOpenSSL update resolves the security vulnerability")
print("in the cryptography package (CVE: versions >=37.0.0 & <43.0.1)\n")
results = []
# Test 1: Package versions
results.append(("Package Versions", test_package_versions()))
# Test 2: SSL functionality
results.append(("SSL Functionality", test_ssl_basic_functionality()))
# Test 3: Integration
results.append(("pyOpenSSL-crypto Integration", test_pyopenssl_crypto_integration()))
# Summary
print("\n" + "=" * 70)
print("TEST SUMMARY")
print("=" * 70)
all_passed = True
for test_name, passed in results:
status = "✓ PASS" if passed else "✗ FAIL"
print(f"{status}: {test_name}")
all_passed = all_passed and passed
print("=" * 70)
if all_passed:
print("\n✓✓✓ ALL TESTS PASSED ✓✓✓")
print("✓ Security vulnerability is resolved")
print("✓ pyOpenSSL >= 25.3.0 is working correctly")
print("✓ cryptography >= 45.0.7 (not vulnerable)")
print("\nThe dependency update is safe to merge.\n")
return True
else:
print("\n✗✗✗ SOME TESTS FAILED ✗✗✗")
print("✗ Security requirements not met")
print("\nDo NOT merge until all tests pass.\n")
return False
if __name__ == "__main__":
try:
success = main()
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n\nTest interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n✗ Unexpected error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)

View File

@@ -1,184 +0,0 @@
"""
Test script to verify pyOpenSSL update doesn't break crawl4ai functionality.
This test verifies:
1. pyOpenSSL and cryptography versions are correct and secure
2. Basic crawling functionality still works
3. HTTPS/SSL connections work properly
4. Stealth mode integration works (uses playwright-stealth internally)
Issue: #1545 - Security vulnerability in cryptography package
Fix: Updated pyOpenSSL from >=24.3.0 to >=25.3.0
Expected: cryptography package should be >=45.0.7 (above vulnerable range)
"""
import asyncio
import sys
from packaging import version
def check_versions():
"""Verify pyOpenSSL and cryptography versions meet security requirements."""
print("=" * 60)
print("STEP 1: Checking Package Versions")
print("=" * 60)
try:
import OpenSSL
pyopenssl_version = OpenSSL.__version__
print(f"✓ pyOpenSSL version: {pyopenssl_version}")
# Check pyOpenSSL >= 25.3.0
if version.parse(pyopenssl_version) >= version.parse("25.3.0"):
print(f" ✓ Version check passed: {pyopenssl_version} >= 25.3.0")
else:
print(f" ✗ Version check FAILED: {pyopenssl_version} < 25.3.0")
return False
except ImportError as e:
print(f"✗ Failed to import pyOpenSSL: {e}")
return False
try:
import cryptography
crypto_version = cryptography.__version__
print(f"✓ cryptography version: {crypto_version}")
# Check cryptography >= 45.0.7 (above vulnerable range)
if version.parse(crypto_version) >= version.parse("45.0.7"):
print(f" ✓ Security check passed: {crypto_version} >= 45.0.7 (not vulnerable)")
else:
print(f" ✗ Security check FAILED: {crypto_version} < 45.0.7 (potentially vulnerable)")
return False
except ImportError as e:
print(f"✗ Failed to import cryptography: {e}")
return False
print("\n✓ All version checks passed!\n")
return True
async def test_basic_crawl():
"""Test basic crawling functionality with HTTPS site."""
print("=" * 60)
print("STEP 2: Testing Basic HTTPS Crawling")
print("=" * 60)
try:
from crawl4ai import AsyncWebCrawler
async with AsyncWebCrawler(verbose=True) as crawler:
# Test with a simple HTTPS site (requires SSL/TLS)
print("Crawling example.com (HTTPS)...")
result = await crawler.arun(
url="https://www.example.com",
bypass_cache=True
)
if result.success:
print(f"✓ Crawl successful!")
print(f" - Status code: {result.status_code}")
print(f" - Content length: {len(result.html)} bytes")
print(f" - SSL/TLS connection: ✓ Working")
return True
else:
print(f"✗ Crawl failed: {result.error_message}")
return False
except Exception as e:
print(f"✗ Test failed with error: {e}")
import traceback
traceback.print_exc()
return False
async def test_stealth_mode():
"""Test stealth mode functionality (depends on playwright-stealth)."""
print("\n" + "=" * 60)
print("STEP 3: Testing Stealth Mode Integration")
print("=" * 60)
try:
from crawl4ai import AsyncWebCrawler, BrowserConfig
# Create browser config with stealth mode
browser_config = BrowserConfig(
headless=True,
verbose=False
)
async with AsyncWebCrawler(config=browser_config, verbose=True) as crawler:
print("Crawling with stealth mode enabled...")
result = await crawler.arun(
url="https://www.example.com",
bypass_cache=True
)
if result.success:
print(f"✓ Stealth crawl successful!")
print(f" - Stealth mode: ✓ Working")
return True
else:
print(f"✗ Stealth crawl failed: {result.error_message}")
return False
except Exception as e:
print(f"✗ Stealth test failed with error: {e}")
import traceback
traceback.print_exc()
return False
async def main():
"""Run all tests."""
print("\n")
print("" + "=" * 58 + "")
print("║ pyOpenSSL Security Update Verification Test (Issue #1545) ║")
print("" + "=" * 58 + "")
print("\n")
# Step 1: Check versions
versions_ok = check_versions()
if not versions_ok:
print("\n✗ FAILED: Version requirements not met")
return False
# Step 2: Test basic crawling
crawl_ok = await test_basic_crawl()
if not crawl_ok:
print("\n✗ FAILED: Basic crawling test failed")
return False
# Step 3: Test stealth mode
stealth_ok = await test_stealth_mode()
if not stealth_ok:
print("\n✗ FAILED: Stealth mode test failed")
return False
# All tests passed
print("\n" + "=" * 60)
print("FINAL RESULT")
print("=" * 60)
print("✓ All tests passed successfully!")
print("✓ pyOpenSSL update is working correctly")
print("✓ No breaking changes detected")
print("✓ Security vulnerability resolved")
print("=" * 60)
print("\n")
return True
if __name__ == "__main__":
try:
success = asyncio.run(main())
sys.exit(0 if success else 1)
except KeyboardInterrupt:
print("\n\nTest interrupted by user")
sys.exit(1)
except Exception as e:
print(f"\n✗ Unexpected error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)