Version 0.7.7 Release Highlights - The Self-Hosting & Monitoring Update
diff --git a/crawl4ai/__init__.py b/crawl4ai/__init__.py
index 8f1fdef4..af35e6a0 100644
--- a/crawl4ai/__init__.py
+++ b/crawl4ai/__init__.py
@@ -72,6 +72,8 @@ from .deep_crawling import (
BestFirstCrawlingStrategy,
DFSDeepCrawlStrategy,
DeepCrawlDecorator,
+ ContentRelevanceFilter,
+ ContentTypeScorer,
)
# NEW: Import AsyncUrlSeeder
from .async_url_seeder import AsyncUrlSeeder
diff --git a/crawl4ai/__version__.py b/crawl4ai/__version__.py
index e70e91c0..68aa7125 100644
--- a/crawl4ai/__version__.py
+++ b/crawl4ai/__version__.py
@@ -1,7 +1,7 @@
# crawl4ai/__version__.py
# This is the version that will be used for stable releases
-__version__ = "0.7.7"
+__version__ = "0.7.8"
# For nightly builds, this gets set during build process
__nightly_version__ = None
diff --git a/crawl4ai/adaptive_crawler.py b/crawl4ai/adaptive_crawler.py
index bce1da23..b7c649b0 100644
--- a/crawl4ai/adaptive_crawler.py
+++ b/crawl4ai/adaptive_crawler.py
@@ -728,18 +728,18 @@ class EmbeddingStrategy(CrawlStrategy):
provider = llm_config_dict.get('provider', 'openai/gpt-4o-mini') if llm_config_dict else 'openai/gpt-4o-mini'
api_token = llm_config_dict.get('api_token') if llm_config_dict else None
- # response = perform_completion_with_backoff(
- # provider=provider,
- # prompt_with_variables=prompt,
- # api_token=api_token,
- # json_response=True
- # )
+ response = perform_completion_with_backoff(
+ provider=provider,
+ prompt_with_variables=prompt,
+ api_token=api_token,
+ json_response=True
+ )
- # variations = json.loads(response.choices[0].message.content)
+ variations = json.loads(response.choices[0].message.content)
# # Mock data with more variations for split
- variations ={'queries': ['what are the best vegetables to use in fried rice?', 'how do I make vegetable fried rice from scratch?', 'can you provide a quick recipe for vegetable fried rice?', 'what cooking techniques are essential for perfect fried rice with vegetables?', 'how to add flavor to vegetable fried rice?', 'are there any tips for making healthy fried rice with vegetables?']}
+ # variations ={'queries': ['what are the best vegetables to use in fried rice?', 'how do I make vegetable fried rice from scratch?', 'can you provide a quick recipe for vegetable fried rice?', 'what cooking techniques are essential for perfect fried rice with vegetables?', 'how to add flavor to vegetable fried rice?', 'are there any tips for making healthy fried rice with vegetables?']}
# variations = {'queries': [
diff --git a/crawl4ai/async_configs.py b/crawl4ai/async_configs.py
index bfa0d398..10cc48d0 100644
--- a/crawl4ai/async_configs.py
+++ b/crawl4ai/async_configs.py
@@ -1,5 +1,5 @@
+import importlib
import os
-from typing import Union
import warnings
import requests
from .config import (
@@ -27,14 +27,14 @@ from .table_extraction import TableExtractionStrategy, DefaultTableExtraction
from .cache_context import CacheMode
from .proxy_strategy import ProxyRotationStrategy
-from typing import Union, List, Callable
import inspect
-from typing import Any, Dict, Optional
+from typing import Any, Callable, Dict, List, Optional, Union
from enum import Enum
# Type alias for URL matching
UrlMatcher = Union[str, Callable[[str], bool], List[Union[str, Callable[[str], bool]]]]
+
class MatchMode(Enum):
OR = "or"
AND = "and"
@@ -42,8 +42,7 @@ class MatchMode(Enum):
# from .proxy_strategy import ProxyConfig
-
-def to_serializable_dict(obj: Any, ignore_default_value : bool = False) -> Dict:
+def to_serializable_dict(obj: Any, ignore_default_value : bool = False):
"""
Recursively convert an object to a serializable dictionary using {type, params} structure
for complex objects.
@@ -110,8 +109,6 @@ def to_serializable_dict(obj: Any, ignore_default_value : bool = False) -> Dict:
# if value is not None:
# current_values[attr_name] = to_serializable_dict(value)
-
-
return {
"type": obj.__class__.__name__,
"params": current_values
@@ -137,12 +134,20 @@ def from_serializable_dict(data: Any) -> Any:
if data["type"] == "dict" and "value" in data:
return {k: from_serializable_dict(v) for k, v in data["value"].items()}
- # Import from crawl4ai for class instances
- import crawl4ai
-
- if hasattr(crawl4ai, data["type"]):
- cls = getattr(crawl4ai, data["type"])
+ cls = None
+ # If you are receiving an error while trying to convert a dict to an object:
+ # Either add a module to `modules_paths` list, or add the `data["type"]` to the crawl4ai __init__.py file
+ module_paths = ["crawl4ai"]
+ for module_path in module_paths:
+ try:
+ mod = importlib.import_module(module_path)
+ if hasattr(mod, data["type"]):
+ cls = getattr(mod, data["type"])
+ break
+ except (ImportError, AttributeError):
+ continue
+ if cls is not None:
# Handle Enum
if issubclass(cls, Enum):
return cls(data["params"])
@@ -598,7 +603,7 @@ class BrowserConfig:
"chrome_channel": self.chrome_channel,
"channel": self.channel,
"proxy": self.proxy,
- "proxy_config": self.proxy_config,
+ "proxy_config": self.proxy_config.to_dict() if self.proxy_config else None,
"viewport_width": self.viewport_width,
"viewport_height": self.viewport_height,
"accept_downloads": self.accept_downloads,
@@ -1792,7 +1797,10 @@ class LLMConfig:
frequency_penalty: Optional[float] = None,
presence_penalty: Optional[float] = None,
stop: Optional[List[str]] = None,
- n: Optional[int] = None,
+ n: Optional[int] = None,
+ backoff_base_delay: Optional[int] = None,
+ backoff_max_attempts: Optional[int] = None,
+ backoff_exponential_factor: Optional[int] = None,
):
"""Configuaration class for LLM provider and API token."""
self.provider = provider
@@ -1821,6 +1829,9 @@ class LLMConfig:
self.presence_penalty = presence_penalty
self.stop = stop
self.n = n
+ self.backoff_base_delay = backoff_base_delay if backoff_base_delay is not None else 2
+ self.backoff_max_attempts = backoff_max_attempts if backoff_max_attempts is not None else 3
+ self.backoff_exponential_factor = backoff_exponential_factor if backoff_exponential_factor is not None else 2
@staticmethod
def from_kwargs(kwargs: dict) -> "LLMConfig":
@@ -1834,7 +1845,10 @@ class LLMConfig:
frequency_penalty=kwargs.get("frequency_penalty"),
presence_penalty=kwargs.get("presence_penalty"),
stop=kwargs.get("stop"),
- n=kwargs.get("n")
+ n=kwargs.get("n"),
+ backoff_base_delay=kwargs.get("backoff_base_delay"),
+ backoff_max_attempts=kwargs.get("backoff_max_attempts"),
+ backoff_exponential_factor=kwargs.get("backoff_exponential_factor")
)
def to_dict(self):
@@ -1848,7 +1862,10 @@ class LLMConfig:
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"stop": self.stop,
- "n": self.n
+ "n": self.n,
+ "backoff_base_delay": self.backoff_base_delay,
+ "backoff_max_attempts": self.backoff_max_attempts,
+ "backoff_exponential_factor": self.backoff_exponential_factor
}
def clone(self, **kwargs):
diff --git a/crawl4ai/async_crawler_strategy.py b/crawl4ai/async_crawler_strategy.py
index 76977bb9..2850b36a 100644
--- a/crawl4ai/async_crawler_strategy.py
+++ b/crawl4ai/async_crawler_strategy.py
@@ -1023,6 +1023,12 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy):
final_messages = await self.adapter.retrieve_console_messages(page)
captured_console.extend(final_messages)
+ ###
+ # This ensures we capture the current page URL at the time we return the response,
+ # which correctly reflects any JavaScript navigation that occurred.
+ ###
+ redirected_url = page.url # Use current page URL to capture JS redirects
+
# Return complete response
return AsyncCrawlResponse(
html=html,
diff --git a/crawl4ai/async_webcrawler.py b/crawl4ai/async_webcrawler.py
index 1b571b50..4dc52adc 100644
--- a/crawl4ai/async_webcrawler.py
+++ b/crawl4ai/async_webcrawler.py
@@ -617,17 +617,17 @@ class AsyncWebCrawler:
else config.chunking_strategy
)
sections = chunking.chunk(content)
- # extracted_content = config.extraction_strategy.run(url, sections)
+ # extracted_content = config.extraction_strategy.run(_url, sections)
# Use async version if available for better parallelism
if hasattr(config.extraction_strategy, 'arun'):
- extracted_content = await config.extraction_strategy.arun(url, sections)
+ extracted_content = await config.extraction_strategy.arun(_url, sections)
else:
# Fallback to sync version run in thread pool to avoid blocking
extracted_content = await asyncio.to_thread(
config.extraction_strategy.run, url, sections
)
-
+
extracted_content = json.dumps(
extracted_content, indent=4, default=str, ensure_ascii=False
)
diff --git a/crawl4ai/content_filter_strategy.py b/crawl4ai/content_filter_strategy.py
index 1e764f74..50baed27 100644
--- a/crawl4ai/content_filter_strategy.py
+++ b/crawl4ai/content_filter_strategy.py
@@ -980,6 +980,9 @@ class LLMContentFilter(RelevantContentFilter):
prompt,
api_token,
base_url=base_url,
+ base_delay=self.llm_config.backoff_base_delay,
+ max_attempts=self.llm_config.backoff_max_attempts,
+ exponential_factor=self.llm_config.backoff_exponential_factor,
extra_args=extra_args,
)
diff --git a/crawl4ai/content_scraping_strategy.py b/crawl4ai/content_scraping_strategy.py
index d9095e49..e915ff5b 100644
--- a/crawl4ai/content_scraping_strategy.py
+++ b/crawl4ai/content_scraping_strategy.py
@@ -542,6 +542,19 @@ class LXMLWebScrapingStrategy(ContentScrapingStrategy):
if el.tag in bypass_tags:
continue
+ # Skip elements inside or tags where whitespace is significant
+ # This preserves whitespace-only spans (e.g., ) in code blocks
+ is_in_code_block = False
+ ancestor = el.getparent()
+ while ancestor is not None:
+ if ancestor.tag in ("pre", "code"):
+ is_in_code_block = True
+ break
+ ancestor = ancestor.getparent()
+
+ if is_in_code_block:
+ continue
+
text_content = (el.text_content() or "").strip()
if (
len(text_content.split()) < word_count_threshold
diff --git a/crawl4ai/deep_crawling/filters.py b/crawl4ai/deep_crawling/filters.py
index 981cbcd8..c075cb7d 100644
--- a/crawl4ai/deep_crawling/filters.py
+++ b/crawl4ai/deep_crawling/filters.py
@@ -509,18 +509,22 @@ class DomainFilter(URLFilter):
class ContentRelevanceFilter(URLFilter):
"""BM25-based relevance filter using head section content"""
- __slots__ = ("query_terms", "threshold", "k1", "b", "avgdl")
+ __slots__ = ("query_terms", "threshold", "k1", "b", "avgdl", "query")
def __init__(
self,
- query: str,
+ query: Union[str, List[str]],
threshold: float,
k1: float = 1.2,
b: float = 0.75,
avgdl: int = 1000,
):
super().__init__(name="BM25RelevanceFilter")
- self.query_terms = self._tokenize(query)
+ if isinstance(query, list):
+ self.query = " ".join(query)
+ else:
+ self.query = query
+ self.query_terms = self._tokenize(self.query)
self.threshold = threshold
self.k1 = k1 # TF saturation parameter
self.b = b # Length normalization parameter
diff --git a/crawl4ai/docker_client.py b/crawl4ai/docker_client.py
index 969fee7c..6624cf07 100644
--- a/crawl4ai/docker_client.py
+++ b/crawl4ai/docker_client.py
@@ -180,7 +180,7 @@ class Crawl4aiDockerClient:
yield CrawlResult(**result)
return stream_results()
- response = await self._request("POST", "/crawl", json=data)
+ response = await self._request("POST", "/crawl", json=data, timeout=hooks_timeout)
result_data = response.json()
if not result_data.get("success", False):
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
diff --git a/crawl4ai/extraction_strategy.py b/crawl4ai/extraction_strategy.py
index 4a64e5d4..7033e380 100644
--- a/crawl4ai/extraction_strategy.py
+++ b/crawl4ai/extraction_strategy.py
@@ -649,6 +649,9 @@ class LLMExtractionStrategy(ExtractionStrategy):
base_url=self.llm_config.base_url,
json_response=self.force_json_response,
extra_args=self.extra_args,
+ base_delay=self.llm_config.backoff_base_delay,
+ max_attempts=self.llm_config.backoff_max_attempts,
+ exponential_factor=self.llm_config.backoff_exponential_factor
) # , json_response=self.extract_type == "schema")
# Track usage
usage = TokenUsage(
@@ -846,6 +849,9 @@ class LLMExtractionStrategy(ExtractionStrategy):
base_url=self.llm_config.base_url,
json_response=self.force_json_response,
extra_args=self.extra_args,
+ base_delay=self.llm_config.backoff_base_delay,
+ max_attempts=self.llm_config.backoff_max_attempts,
+ exponential_factor=self.llm_config.backoff_exponential_factor
)
# Track usage
usage = TokenUsage(
diff --git a/crawl4ai/models.py b/crawl4ai/models.py
index 63e39885..e46bb7fa 100644
--- a/crawl4ai/models.py
+++ b/crawl4ai/models.py
@@ -1,4 +1,4 @@
-from pydantic import BaseModel, HttpUrl, PrivateAttr, Field
+from pydantic import BaseModel, HttpUrl, PrivateAttr, Field, ConfigDict
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
from typing import AsyncGenerator
from typing import Generic, TypeVar
@@ -153,8 +153,7 @@ class CrawlResult(BaseModel):
console_messages: Optional[List[Dict[str, Any]]] = None
tables: List[Dict] = Field(default_factory=list) # NEW โ [{headers,rows,caption,summary}]
- class Config:
- arbitrary_types_allowed = True
+ model_config = ConfigDict(arbitrary_types_allowed=True)
# NOTE: The StringCompatibleMarkdown class, custom __init__ method, property getters/setters,
# and model_dump override all exist to support a smooth transition from markdown as a string
@@ -332,8 +331,7 @@ class AsyncCrawlResponse(BaseModel):
network_requests: Optional[List[Dict[str, Any]]] = None
console_messages: Optional[List[Dict[str, Any]]] = None
- class Config:
- arbitrary_types_allowed = True
+ model_config = ConfigDict(arbitrary_types_allowed=True)
###############################
# Scraping Models
diff --git a/crawl4ai/processors/pdf/processor.py b/crawl4ai/processors/pdf/processor.py
index 2888eef1..1af25c05 100644
--- a/crawl4ai/processors/pdf/processor.py
+++ b/crawl4ai/processors/pdf/processor.py
@@ -15,9 +15,9 @@ from .utils import (
clean_pdf_text_to_html,
)
-# Remove direct PyPDF2 imports from the top
-# import PyPDF2
-# from PyPDF2 import PdfReader
+# Remove direct pypdf imports from the top
+# import pypdf
+# from pypdf import PdfReader
logger = logging.getLogger(__name__)
@@ -59,9 +59,9 @@ class NaivePDFProcessorStrategy(PDFProcessorStrategy):
save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4):
# Import check at initialization time
try:
- import PyPDF2
+ import pypdf
except ImportError:
- raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
+ raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
self.image_dpi = image_dpi
self.image_quality = image_quality
@@ -75,9 +75,9 @@ class NaivePDFProcessorStrategy(PDFProcessorStrategy):
def process(self, pdf_path: Path) -> PDFProcessResult:
# Import inside method to allow dependency to be optional
try:
- from PyPDF2 import PdfReader
+ from pypdf import PdfReader
except ImportError:
- raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
+ raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
start_time = time()
result = PDFProcessResult(
@@ -125,15 +125,15 @@ class NaivePDFProcessorStrategy(PDFProcessorStrategy):
"""Like process() but processes PDF pages in parallel batches"""
# Import inside method to allow dependency to be optional
try:
- from PyPDF2 import PdfReader
- import PyPDF2 # For type checking
+ from pypdf import PdfReader
+ import pypdf # For type checking
except ImportError:
- raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
+ raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
import concurrent.futures
import threading
- # Initialize PyPDF2 thread support
+ # Initialize pypdf thread support
if not hasattr(threading.current_thread(), "_children"):
threading.current_thread()._children = set()
@@ -232,11 +232,11 @@ class NaivePDFProcessorStrategy(PDFProcessorStrategy):
return pdf_page
def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]:
- # Import PyPDF2 for type checking only when needed
+ # Import pypdf for type checking only when needed
try:
- import PyPDF2
+ from pypdf.generic import IndirectObject
except ImportError:
- raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
+ raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
if not self.extract_images:
return []
@@ -266,7 +266,7 @@ class NaivePDFProcessorStrategy(PDFProcessorStrategy):
width = xobj.get('/Width', 0)
height = xobj.get('/Height', 0)
color_space = xobj.get('/ColorSpace', '/DeviceRGB')
- if isinstance(color_space, PyPDF2.generic.IndirectObject):
+ if isinstance(color_space, IndirectObject):
color_space = color_space.get_object()
# Handle different image encodings
@@ -277,7 +277,7 @@ class NaivePDFProcessorStrategy(PDFProcessorStrategy):
if '/FlateDecode' in filters:
try:
decode_parms = xobj.get('/DecodeParms', {})
- if isinstance(decode_parms, PyPDF2.generic.IndirectObject):
+ if isinstance(decode_parms, IndirectObject):
decode_parms = decode_parms.get_object()
predictor = decode_parms.get('/Predictor', 1)
@@ -416,10 +416,10 @@ class NaivePDFProcessorStrategy(PDFProcessorStrategy):
# Import inside method to allow dependency to be optional
if reader is None:
try:
- from PyPDF2 import PdfReader
+ from pypdf import PdfReader
reader = PdfReader(pdf_path)
except ImportError:
- raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
+ raise ImportError("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
meta = reader.metadata or {}
created = self._parse_pdf_date(meta.get('/CreationDate', ''))
@@ -459,11 +459,11 @@ if __name__ == "__main__":
from pathlib import Path
try:
- # Import PyPDF2 only when running the file directly
- import PyPDF2
- from PyPDF2 import PdfReader
+ # Import pypdf only when running the file directly
+ import pypdf
+ from pypdf import PdfReader
except ImportError:
- print("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
+ print("pypdf is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
exit(1)
current_dir = Path(__file__).resolve().parent
diff --git a/crawl4ai/table_extraction.py b/crawl4ai/table_extraction.py
index b2f1992b..7edb3b76 100644
--- a/crawl4ai/table_extraction.py
+++ b/crawl4ai/table_extraction.py
@@ -795,6 +795,9 @@ Return only a JSON array of extracted tables following the specified format."""
api_token=self.llm_config.api_token,
base_url=self.llm_config.base_url,
json_response=True,
+ base_delay=self.llm_config.backoff_base_delay,
+ max_attempts=self.llm_config.backoff_max_attempts,
+ exponential_factor=self.llm_config.backoff_exponential_factor,
extra_args=self.extra_args
)
@@ -1116,6 +1119,9 @@ Return only a JSON array of extracted tables following the specified format."""
api_token=self.llm_config.api_token,
base_url=self.llm_config.base_url,
json_response=True,
+ base_delay=self.llm_config.backoff_base_delay,
+ max_attempts=self.llm_config.backoff_max_attempts,
+ exponential_factor=self.llm_config.backoff_exponential_factor,
extra_args=self.extra_args
)
diff --git a/crawl4ai/utils.py b/crawl4ai/utils.py
index 68a343fb..74216095 100644
--- a/crawl4ai/utils.py
+++ b/crawl4ai/utils.py
@@ -1745,6 +1745,9 @@ def perform_completion_with_backoff(
api_token,
json_response=False,
base_url=None,
+ base_delay=2,
+ max_attempts=3,
+ exponential_factor=2,
**kwargs,
):
"""
@@ -1761,6 +1764,9 @@ def perform_completion_with_backoff(
api_token (str): The API token for authentication.
json_response (bool): Whether to request a JSON response. Defaults to False.
base_url (Optional[str]): The base URL for the API. Defaults to None.
+ base_delay (int): The base delay in seconds. Defaults to 2.
+ max_attempts (int): The maximum number of attempts. Defaults to 3.
+ exponential_factor (int): The exponential factor. Defaults to 2.
**kwargs: Additional arguments for the API request.
Returns:
@@ -1770,9 +1776,6 @@ def perform_completion_with_backoff(
from litellm import completion
from litellm.exceptions import RateLimitError
- max_attempts = 3
- base_delay = 2 # Base delay in seconds, you can adjust this based on your needs
-
extra_args = {"temperature": 0.01, "api_key": api_token, "base_url": base_url}
if json_response:
extra_args["response_format"] = {"type": "json_object"}
@@ -1798,7 +1801,7 @@ def perform_completion_with_backoff(
# Check if we have exhausted our max attempts
if attempt < max_attempts - 1:
# Calculate the delay and wait
- delay = base_delay * (2**attempt) # Exponential backoff formula
+ delay = base_delay * (exponential_factor**attempt) # Exponential backoff formula
print(f"Waiting for {delay} seconds before retrying...")
time.sleep(delay)
else:
@@ -1831,6 +1834,9 @@ async def aperform_completion_with_backoff(
api_token,
json_response=False,
base_url=None,
+ base_delay=2,
+ max_attempts=3,
+ exponential_factor=2,
**kwargs,
):
"""
@@ -1847,6 +1853,9 @@ async def aperform_completion_with_backoff(
api_token (str): The API token for authentication.
json_response (bool): Whether to request a JSON response. Defaults to False.
base_url (Optional[str]): The base URL for the API. Defaults to None.
+ base_delay (int): The base delay in seconds. Defaults to 2.
+ max_attempts (int): The maximum number of attempts. Defaults to 3.
+ exponential_factor (int): The exponential factor. Defaults to 2.
**kwargs: Additional arguments for the API request.
Returns:
@@ -1857,9 +1866,6 @@ async def aperform_completion_with_backoff(
from litellm.exceptions import RateLimitError
import asyncio
- max_attempts = 3
- base_delay = 2 # Base delay in seconds, you can adjust this based on your needs
-
extra_args = {"temperature": 0.01, "api_key": api_token, "base_url": base_url}
if json_response:
extra_args["response_format"] = {"type": "json_object"}
@@ -1885,7 +1891,7 @@ async def aperform_completion_with_backoff(
# Check if we have exhausted our max attempts
if attempt < max_attempts - 1:
# Calculate the delay and wait
- delay = base_delay * (2**attempt) # Exponential backoff formula
+ delay = base_delay * (exponential_factor**attempt) # Exponential backoff formula
print(f"Waiting for {delay} seconds before retrying...")
await asyncio.sleep(delay)
else:
diff --git a/deploy/docker/api.py b/deploy/docker/api.py
index 4fab27b1..81cd312a 100644
--- a/deploy/docker/api.py
+++ b/deploy/docker/api.py
@@ -108,7 +108,10 @@ async def handle_llm_qa(
prompt_with_variables=prompt,
api_token=get_llm_api_key(config), # Returns None to let litellm handle it
temperature=get_llm_temperature(config),
- base_url=get_llm_base_url(config)
+ base_url=get_llm_base_url(config),
+ base_delay=config["llm"].get("backoff_base_delay", 2),
+ max_attempts=config["llm"].get("backoff_max_attempts", 3),
+ exponential_factor=config["llm"].get("backoff_exponential_factor", 2)
)
return response.choices[0].message.content
diff --git a/docs/blog/release-v0.7.8.md b/docs/blog/release-v0.7.8.md
new file mode 100644
index 00000000..8dbe99c0
--- /dev/null
+++ b/docs/blog/release-v0.7.8.md
@@ -0,0 +1,327 @@
+# Crawl4AI v0.7.8: Stability & Bug Fix Release
+
+*December 2025*
+
+---
+
+I'm releasing Crawl4AI v0.7.8โa focused stability release that addresses 11 bugs reported by the community. While there are no new features in this release, these fixes resolve important issues affecting Docker deployments, LLM extraction, URL handling, and dependency compatibility.
+
+## What's Fixed at a Glance
+
+- **Docker API**: Fixed ContentRelevanceFilter deserialization, ProxyConfig serialization, and cache folder permissions
+- **LLM Extraction**: Configurable rate limiter backoff, HTML input format support, and proper URL handling for raw HTML
+- **URL Handling**: Correct relative URL resolution after JavaScript redirects
+- **Dependencies**: Replaced deprecated PyPDF2 with pypdf, Pydantic v2 ConfigDict compatibility
+- **AdaptiveCrawler**: Fixed query expansion to actually use LLM instead of hardcoded mock data
+
+## Bug Fixes
+
+### Docker & API Fixes
+
+#### ContentRelevanceFilter Deserialization (#1642)
+
+**The Problem:** When sending deep crawl requests to the Docker API with `ContentRelevanceFilter`, the server failed to deserialize the filter, causing requests to fail.
+
+**The Fix:** I added `ContentRelevanceFilter` to the public exports and enhanced the deserialization logic with dynamic imports.
+
+```python
+# This now works correctly in Docker API
+import httpx
+
+request = {
+ "urls": ["https://docs.example.com"],
+ "crawler_config": {
+ "deep_crawl_strategy": {
+ "type": "BFSDeepCrawlStrategy",
+ "max_depth": 2,
+ "filter_chain": [
+ {
+ "type": "ContentRelevanceFilter",
+ "query": "API documentation",
+ "threshold": 0.3
+ }
+ ]
+ }
+ }
+}
+
+async with httpx.AsyncClient() as client:
+ response = await client.post("http://localhost:11235/crawl", json=request)
+ # Previously failed, now works!
+```
+
+#### ProxyConfig JSON Serialization (#1629)
+
+**The Problem:** `BrowserConfig.to_dict()` failed when `proxy_config` was set because `ProxyConfig` wasn't being serialized to a dictionary.
+
+**The Fix:** `ProxyConfig.to_dict()` is now called during serialization.
+
+```python
+from crawl4ai import BrowserConfig
+from crawl4ai.async_configs import ProxyConfig
+
+proxy = ProxyConfig(
+ server="http://proxy.example.com:8080",
+ username="user",
+ password="pass"
+)
+
+config = BrowserConfig(headless=True, proxy_config=proxy)
+
+# Previously raised TypeError, now works
+config_dict = config.to_dict()
+json.dumps(config_dict) # Valid JSON
+```
+
+#### Docker Cache Folder Permissions (#1638)
+
+**The Problem:** The `.cache` folder in the Docker image had incorrect permissions, causing crawling to fail when caching was enabled.
+
+**The Fix:** Corrected ownership and permissions during image build.
+
+```bash
+# Cache now works correctly in Docker
+docker run -d -p 11235:11235 \
+ --shm-size=1g \
+ -v ./my-cache:/app/.cache \
+ unclecode/crawl4ai:0.7.8
+```
+
+---
+
+### LLM & Extraction Fixes
+
+#### Configurable Rate Limiter Backoff (#1269)
+
+**The Problem:** The LLM rate limiting backoff parameters were hardcoded, making it impossible to adjust retry behavior for different API rate limits.
+
+**The Fix:** `LLMConfig` now accepts three new parameters for complete control over retry behavior.
+
+```python
+from crawl4ai import LLMConfig
+
+# Default behavior (unchanged)
+default_config = LLMConfig(provider="openai/gpt-4o-mini")
+# backoff_base_delay=2, backoff_max_attempts=3, backoff_exponential_factor=2
+
+# Custom configuration for APIs with strict rate limits
+custom_config = LLMConfig(
+ provider="openai/gpt-4o-mini",
+ backoff_base_delay=5, # Wait 5 seconds on first retry
+ backoff_max_attempts=5, # Try up to 5 times
+ backoff_exponential_factor=3 # Multiply delay by 3 each attempt
+)
+
+# Retry sequence: 5s -> 15s -> 45s -> 135s -> 405s
+```
+
+#### LLM Strategy HTML Input Support (#1178)
+
+**The Problem:** `LLMExtractionStrategy` always sent markdown to the LLM, but some extraction tasks work better with HTML structure preserved.
+
+**The Fix:** Added `input_format` parameter supporting `"markdown"`, `"html"`, `"fit_markdown"`, `"cleaned_html"`, and `"fit_html"`.
+
+```python
+from crawl4ai import LLMExtractionStrategy, LLMConfig
+
+# Default: markdown input (unchanged)
+markdown_strategy = LLMExtractionStrategy(
+ llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
+ instruction="Extract product information"
+)
+
+# NEW: HTML input - preserves table/list structure
+html_strategy = LLMExtractionStrategy(
+ llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
+ instruction="Extract the data table preserving structure",
+ input_format="html"
+)
+
+# NEW: Filtered markdown - only relevant content
+fit_strategy = LLMExtractionStrategy(
+ llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
+ instruction="Summarize the main content",
+ input_format="fit_markdown"
+)
+```
+
+#### Raw HTML URL Variable (#1116)
+
+**The Problem:** When using `url="raw:..."`, the entire HTML content was being passed to extraction strategies as the URL parameter, polluting LLM prompts.
+
+**The Fix:** The URL is now correctly set to `"Raw HTML"` for raw HTML inputs.
+
+```python
+from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
+
+html = "Test "
+
+async with AsyncWebCrawler() as crawler:
+ result = await crawler.arun(
+ url=f"raw:{html}",
+ config=CrawlerRunConfig(extraction_strategy=my_strategy)
+ )
+ # extraction_strategy receives url="Raw HTML" instead of the HTML blob
+```
+
+---
+
+### URL Handling Fix
+
+#### Relative URLs After Redirects (#1268)
+
+**The Problem:** When JavaScript caused a page redirect, relative links were resolved against the original URL instead of the final URL.
+
+**The Fix:** `redirected_url` now captures the actual page URL after all JavaScript execution completes.
+
+```python
+from crawl4ai import AsyncWebCrawler
+
+async with AsyncWebCrawler() as crawler:
+ # Page at /old-page redirects via JS to /new-page
+ result = await crawler.arun(url="https://example.com/old-page")
+
+ # BEFORE: redirected_url = "https://example.com/old-page"
+ # AFTER: redirected_url = "https://example.com/new-page"
+
+ # Links are now correctly resolved against the final URL
+ for link in result.links['internal']:
+ print(link['href']) # Relative links resolved correctly
+```
+
+---
+
+### Dependency & Compatibility Fixes
+
+#### PyPDF2 Replaced with pypdf (#1412)
+
+**The Problem:** PyPDF2 was deprecated in 2022 and is no longer maintained.
+
+**The Fix:** Replaced with the actively maintained `pypdf` library.
+
+```python
+# Installation (unchanged)
+pip install crawl4ai[pdf]
+
+# The PDF processor now uses pypdf internally
+# No code changes required - API remains the same
+```
+
+#### Pydantic v2 ConfigDict Compatibility (#678)
+
+**The Problem:** Using the deprecated `class Config` syntax caused deprecation warnings with Pydantic v2.
+
+**The Fix:** Migrated to `model_config = ConfigDict(...)` syntax.
+
+```python
+# No more deprecation warnings when importing crawl4ai models
+from crawl4ai.models import CrawlResult
+from crawl4ai import CrawlerRunConfig, BrowserConfig
+
+# All models are now Pydantic v2 compatible
+```
+
+---
+
+### AdaptiveCrawler Fix
+
+#### Query Expansion Using LLM (#1621)
+
+**The Problem:** The `EmbeddingStrategy` in AdaptiveCrawler had commented-out LLM code and was using hardcoded mock query variations instead.
+
+**The Fix:** Uncommented and activated the LLM call for actual query expansion.
+
+```python
+# AdaptiveCrawler query expansion now actually uses the LLM
+# Instead of hardcoded variations like:
+# variations = {'queries': ['what are the best vegetables...']}
+
+# The LLM generates relevant query variations based on your actual query
+```
+
+---
+
+### Code Formatting Fix
+
+#### Import Statement Formatting (#1181)
+
+**The Problem:** When extracting code from web pages, import statements were sometimes concatenated without proper line separation.
+
+**The Fix:** Import statements now maintain proper newline separation.
+
+```python
+# BEFORE: "import osimport sysfrom pathlib import Path"
+# AFTER:
+# import os
+# import sys
+# from pathlib import Path
+```
+
+---
+
+## Breaking Changes
+
+**None!** This release is fully backward compatible.
+
+- All existing code continues to work without modification
+- New parameters have sensible defaults matching previous behavior
+- No API changes to existing functionality
+
+---
+
+## Upgrade Instructions
+
+### Python Package
+
+```bash
+pip install --upgrade crawl4ai
+# or
+pip install crawl4ai==0.7.8
+```
+
+### Docker
+
+```bash
+# Pull the latest version
+docker pull unclecode/crawl4ai:0.7.8
+
+# Run
+docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.8
+```
+
+---
+
+## Verification
+
+Run the verification tests to confirm all fixes are working:
+
+```bash
+python docs/releases_review/demo_v0.7.8.py
+```
+
+This runs actual tests that verify each bug fix is properly implemented.
+
+---
+
+## Acknowledgments
+
+Thank you to everyone who reported these issues and provided detailed reproduction steps. Your bug reports make Crawl4AI better for everyone.
+
+Issues fixed: #1642, #1638, #1629, #1621, #1412, #1269, #1268, #1181, #1178, #1116, #678
+
+---
+
+## Support & Resources
+
+- **Documentation**: [docs.crawl4ai.com](https://docs.crawl4ai.com)
+- **GitHub**: [github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)
+- **Discord**: [discord.gg/crawl4ai](https://discord.gg/jP8KfhDhyN)
+- **Twitter**: [@unclecode](https://x.com/unclecode)
+
+---
+
+**This stability release ensures Crawl4AI works reliably across Docker deployments, LLM extraction workflows, and various edge cases. Thank you for your continued support and feedback!**
+
+**Happy crawling!**
+
+*- unclecode*
diff --git a/docs/md_v2/api/parameters.md b/docs/md_v2/api/parameters.md
index 41984ba5..9d907516 100644
--- a/docs/md_v2/api/parameters.md
+++ b/docs/md_v2/api/parameters.md
@@ -439,10 +439,19 @@ LLMConfig is useful to pass LLM provider config to strategies and functions that
| **`provider`** | `"ollama/llama3","groq/llama3-70b-8192","groq/llama3-8b-8192", "openai/gpt-4o-mini" ,"openai/gpt-4o","openai/o1-mini","openai/o1-preview","openai/o3-mini","openai/o3-mini-high","anthropic/claude-3-haiku-20240307","anthropic/claude-3-opus-20240229","anthropic/claude-3-sonnet-20240229","anthropic/claude-3-5-sonnet-20240620","gemini/gemini-pro","gemini/gemini-1.5-pro","gemini/gemini-2.0-flash","gemini/gemini-2.0-flash-exp","gemini/gemini-2.0-flash-lite-preview-02-05","deepseek/deepseek-chat"` *(default: `"openai/gpt-4o-mini"`)* | Which LLM provider to use.
| **`api_token`** |1.Optional. When not provided explicitly, api_token will be read from environment variables based on provider. For example: If a gemini model is passed as provider then,`"GEMINI_API_KEY"` will be read from environment variables 2. API token of LLM provider eg: `api_token = "gsk_1ClHGGJ7Lpn4WGybR7vNWGdyb3FY7zXEw3SCiy0BAVM9lL8CQv"` 3. Environment variable - use with prefix "env:" eg:`api_token = "env: GROQ_API_KEY"` | API token to use for the given provider
| **`base_url`** |Optional. Custom API endpoint | If your provider has a custom endpoint
+| **`backoff_base_delay`** |Optional. `int` *(default: `2`)* | Seconds to wait before the first retry when the provider throttles a request.
+| **`backoff_max_attempts`** |Optional. `int` *(default: `3`)* | Total tries (initial call + retries) before surfacing an error.
+| **`backoff_exponential_factor`** |Optional. `int` *(default: `2`)* | Multiplier that increases the wait time for each retry (`delay = base_delay * factor^attempt`).
## 3.2 Example Usage
```python
-llm_config = LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY"))
+llm_config = LLMConfig(
+ provider="openai/gpt-4o-mini",
+ api_token=os.getenv("OPENAI_API_KEY"),
+ backoff_base_delay=1, # optional
+ backoff_max_attempts=5, # optional
+ backoff_exponential_factor=3, # optional
+)
```
## 4. Putting It All Together
diff --git a/docs/md_v2/blog/index.md b/docs/md_v2/blog/index.md
index af738cb8..ec4ed527 100644
--- a/docs/md_v2/blog/index.md
+++ b/docs/md_v2/blog/index.md
@@ -20,25 +20,35 @@ Ever wondered why your AI coding assistant struggles with your library despite c
## Latest Release
+### [Crawl4AI v0.7.8 โ Stability & Bug Fix Release](../blog/release-v0.7.8.md)
+*December 2025*
+
+Crawl4AI v0.7.8 is a focused stability release addressing 11 bugs reported by the community. While there are no new features, these fixes resolve important issues affecting Docker deployments, LLM extraction, URL handling, and dependency compatibility.
+
+Key highlights:
+- **๐ณ Docker API Fixes**: ContentRelevanceFilter deserialization, ProxyConfig serialization, cache folder permissions
+- **๐ค LLM Improvements**: Configurable rate limiter backoff, HTML input format support, raw HTML URL handling
+- **๐ URL Handling**: Correct relative URL resolution after JavaScript redirects
+- **๐ฆ Dependencies**: Replaced deprecated PyPDF2 with pypdf, Pydantic v2 ConfigDict compatibility
+- **๐ง AdaptiveCrawler**: Fixed query expansion to actually use LLM instead of mock data
+
+[Read full release notes โ](../blog/release-v0.7.8.md)
+
+## Recent Releases
+
### [Crawl4AI v0.7.7 โ The Self-Hosting & Monitoring Update](../blog/release-v0.7.7.md)
*November 14, 2025*
-Crawl4AI v0.7.7 transforms Docker into a complete self-hosting platform with enterprise-grade real-time monitoring, comprehensive observability, and full operational control. Experience complete visibility into your crawling infrastructure!
+Crawl4AI v0.7.7 transforms Docker into a complete self-hosting platform with enterprise-grade real-time monitoring, comprehensive observability, and full operational control.
Key highlights:
-- **๐ Real-time Monitoring Dashboard**: Interactive web UI with live system metrics and browser pool visibility
-- **๐ Comprehensive Monitor API**: Complete REST API for programmatic access to all monitoring data
-- **โก WebSocket Streaming**: Real-time updates every 2 seconds for custom dashboards
-- **๐ฅ Smart Browser Pool**: 3-tier architecture (permanent/hot/cold) with automatic promotion and cleanup
-- **๐งน Janitor System**: Automatic resource management with event logging
-- **๐ฎ Control Actions**: Manual browser management (kill, restart, cleanup) via API
-- **๐ Production Ready**: Prometheus integration, alerting patterns, and 6 critical metrics for ops excellence
-- **๐ Critical Fixes**: Async LLM extraction (#1055), DFS crawling (#1607), viewport config, and security updates
+- **๐ Real-time Monitoring Dashboard**: Interactive web UI with live system metrics
+- **๐ Comprehensive Monitor API**: Complete REST API for programmatic access
+- **โก WebSocket Streaming**: Real-time updates every 2 seconds
+- **๐ฅ Smart Browser Pool**: 3-tier architecture with automatic promotion and cleanup
[Read full release notes โ](../blog/release-v0.7.7.md)
-## Recent Releases
-
### [Crawl4AI v0.7.6 โ The Webhook Infrastructure Update](../blog/release-v0.7.6.md)
*October 22, 2025*
@@ -66,15 +76,17 @@ Key highlights:
[Read full release notes โ](../blog/release-v0.7.5.md)
-### [Crawl4AI v0.7.4 โ The Intelligent Table Extraction & Performance Update](../blog/release-v0.7.4.md)
-*August 17, 2025*
-
-Revolutionary LLM-powered table extraction with intelligent chunking, performance improvements for concurrent crawling, enhanced browser management, and critical stability fixes.
-
-[Read full release notes โ](../blog/release-v0.7.4.md)
-
---
+## Older Releases
+
+| Version | Date | Highlights |
+|---------|------|------------|
+| [v0.7.4](../blog/release-v0.7.4.md) | August 2025 | LLM-powered table extraction, performance improvements |
+| [v0.7.3](../blog/release-v0.7.3.md) | July 2025 | Undetected browser, multi-URL config, memory monitoring |
+| [v0.7.1](../blog/release-v0.7.1.md) | June 2025 | Bug fixes and stability improvements |
+| [v0.7.0](../blog/release-v0.7.0.md) | May 2025 | Adaptive crawling, virtual scroll, link analysis |
+
## Project History
Curious about how Crawl4AI has evolved? Check out our [complete changelog](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md) for a detailed history of all versions and updates.
diff --git a/docs/md_v2/blog/releases/v0.7.8.md b/docs/md_v2/blog/releases/v0.7.8.md
new file mode 100644
index 00000000..8dbe99c0
--- /dev/null
+++ b/docs/md_v2/blog/releases/v0.7.8.md
@@ -0,0 +1,327 @@
+# Crawl4AI v0.7.8: Stability & Bug Fix Release
+
+*December 2025*
+
+---
+
+I'm releasing Crawl4AI v0.7.8โa focused stability release that addresses 11 bugs reported by the community. While there are no new features in this release, these fixes resolve important issues affecting Docker deployments, LLM extraction, URL handling, and dependency compatibility.
+
+## What's Fixed at a Glance
+
+- **Docker API**: Fixed ContentRelevanceFilter deserialization, ProxyConfig serialization, and cache folder permissions
+- **LLM Extraction**: Configurable rate limiter backoff, HTML input format support, and proper URL handling for raw HTML
+- **URL Handling**: Correct relative URL resolution after JavaScript redirects
+- **Dependencies**: Replaced deprecated PyPDF2 with pypdf, Pydantic v2 ConfigDict compatibility
+- **AdaptiveCrawler**: Fixed query expansion to actually use LLM instead of hardcoded mock data
+
+## Bug Fixes
+
+### Docker & API Fixes
+
+#### ContentRelevanceFilter Deserialization (#1642)
+
+**The Problem:** When sending deep crawl requests to the Docker API with `ContentRelevanceFilter`, the server failed to deserialize the filter, causing requests to fail.
+
+**The Fix:** I added `ContentRelevanceFilter` to the public exports and enhanced the deserialization logic with dynamic imports.
+
+```python
+# This now works correctly in Docker API
+import httpx
+
+request = {
+ "urls": ["https://docs.example.com"],
+ "crawler_config": {
+ "deep_crawl_strategy": {
+ "type": "BFSDeepCrawlStrategy",
+ "max_depth": 2,
+ "filter_chain": [
+ {
+ "type": "ContentRelevanceFilter",
+ "query": "API documentation",
+ "threshold": 0.3
+ }
+ ]
+ }
+ }
+}
+
+async with httpx.AsyncClient() as client:
+ response = await client.post("http://localhost:11235/crawl", json=request)
+ # Previously failed, now works!
+```
+
+#### ProxyConfig JSON Serialization (#1629)
+
+**The Problem:** `BrowserConfig.to_dict()` failed when `proxy_config` was set because `ProxyConfig` wasn't being serialized to a dictionary.
+
+**The Fix:** `ProxyConfig.to_dict()` is now called during serialization.
+
+```python
+from crawl4ai import BrowserConfig
+from crawl4ai.async_configs import ProxyConfig
+
+proxy = ProxyConfig(
+ server="http://proxy.example.com:8080",
+ username="user",
+ password="pass"
+)
+
+config = BrowserConfig(headless=True, proxy_config=proxy)
+
+# Previously raised TypeError, now works
+config_dict = config.to_dict()
+json.dumps(config_dict) # Valid JSON
+```
+
+#### Docker Cache Folder Permissions (#1638)
+
+**The Problem:** The `.cache` folder in the Docker image had incorrect permissions, causing crawling to fail when caching was enabled.
+
+**The Fix:** Corrected ownership and permissions during image build.
+
+```bash
+# Cache now works correctly in Docker
+docker run -d -p 11235:11235 \
+ --shm-size=1g \
+ -v ./my-cache:/app/.cache \
+ unclecode/crawl4ai:0.7.8
+```
+
+---
+
+### LLM & Extraction Fixes
+
+#### Configurable Rate Limiter Backoff (#1269)
+
+**The Problem:** The LLM rate limiting backoff parameters were hardcoded, making it impossible to adjust retry behavior for different API rate limits.
+
+**The Fix:** `LLMConfig` now accepts three new parameters for complete control over retry behavior.
+
+```python
+from crawl4ai import LLMConfig
+
+# Default behavior (unchanged)
+default_config = LLMConfig(provider="openai/gpt-4o-mini")
+# backoff_base_delay=2, backoff_max_attempts=3, backoff_exponential_factor=2
+
+# Custom configuration for APIs with strict rate limits
+custom_config = LLMConfig(
+ provider="openai/gpt-4o-mini",
+ backoff_base_delay=5, # Wait 5 seconds on first retry
+ backoff_max_attempts=5, # Try up to 5 times
+ backoff_exponential_factor=3 # Multiply delay by 3 each attempt
+)
+
+# Retry sequence: 5s -> 15s -> 45s -> 135s -> 405s
+```
+
+#### LLM Strategy HTML Input Support (#1178)
+
+**The Problem:** `LLMExtractionStrategy` always sent markdown to the LLM, but some extraction tasks work better with HTML structure preserved.
+
+**The Fix:** Added `input_format` parameter supporting `"markdown"`, `"html"`, `"fit_markdown"`, `"cleaned_html"`, and `"fit_html"`.
+
+```python
+from crawl4ai import LLMExtractionStrategy, LLMConfig
+
+# Default: markdown input (unchanged)
+markdown_strategy = LLMExtractionStrategy(
+ llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
+ instruction="Extract product information"
+)
+
+# NEW: HTML input - preserves table/list structure
+html_strategy = LLMExtractionStrategy(
+ llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
+ instruction="Extract the data table preserving structure",
+ input_format="html"
+)
+
+# NEW: Filtered markdown - only relevant content
+fit_strategy = LLMExtractionStrategy(
+ llm_config=LLMConfig(provider="openai/gpt-4o-mini"),
+ instruction="Summarize the main content",
+ input_format="fit_markdown"
+)
+```
+
+#### Raw HTML URL Variable (#1116)
+
+**The Problem:** When using `url="raw:..."`, the entire HTML content was being passed to extraction strategies as the URL parameter, polluting LLM prompts.
+
+**The Fix:** The URL is now correctly set to `"Raw HTML"` for raw HTML inputs.
+
+```python
+from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
+
+html = "Test "
+
+async with AsyncWebCrawler() as crawler:
+ result = await crawler.arun(
+ url=f"raw:{html}",
+ config=CrawlerRunConfig(extraction_strategy=my_strategy)
+ )
+ # extraction_strategy receives url="Raw HTML" instead of the HTML blob
+```
+
+---
+
+### URL Handling Fix
+
+#### Relative URLs After Redirects (#1268)
+
+**The Problem:** When JavaScript caused a page redirect, relative links were resolved against the original URL instead of the final URL.
+
+**The Fix:** `redirected_url` now captures the actual page URL after all JavaScript execution completes.
+
+```python
+from crawl4ai import AsyncWebCrawler
+
+async with AsyncWebCrawler() as crawler:
+ # Page at /old-page redirects via JS to /new-page
+ result = await crawler.arun(url="https://example.com/old-page")
+
+ # BEFORE: redirected_url = "https://example.com/old-page"
+ # AFTER: redirected_url = "https://example.com/new-page"
+
+ # Links are now correctly resolved against the final URL
+ for link in result.links['internal']:
+ print(link['href']) # Relative links resolved correctly
+```
+
+---
+
+### Dependency & Compatibility Fixes
+
+#### PyPDF2 Replaced with pypdf (#1412)
+
+**The Problem:** PyPDF2 was deprecated in 2022 and is no longer maintained.
+
+**The Fix:** Replaced with the actively maintained `pypdf` library.
+
+```python
+# Installation (unchanged)
+pip install crawl4ai[pdf]
+
+# The PDF processor now uses pypdf internally
+# No code changes required - API remains the same
+```
+
+#### Pydantic v2 ConfigDict Compatibility (#678)
+
+**The Problem:** Using the deprecated `class Config` syntax caused deprecation warnings with Pydantic v2.
+
+**The Fix:** Migrated to `model_config = ConfigDict(...)` syntax.
+
+```python
+# No more deprecation warnings when importing crawl4ai models
+from crawl4ai.models import CrawlResult
+from crawl4ai import CrawlerRunConfig, BrowserConfig
+
+# All models are now Pydantic v2 compatible
+```
+
+---
+
+### AdaptiveCrawler Fix
+
+#### Query Expansion Using LLM (#1621)
+
+**The Problem:** The `EmbeddingStrategy` in AdaptiveCrawler had commented-out LLM code and was using hardcoded mock query variations instead.
+
+**The Fix:** Uncommented and activated the LLM call for actual query expansion.
+
+```python
+# AdaptiveCrawler query expansion now actually uses the LLM
+# Instead of hardcoded variations like:
+# variations = {'queries': ['what are the best vegetables...']}
+
+# The LLM generates relevant query variations based on your actual query
+```
+
+---
+
+### Code Formatting Fix
+
+#### Import Statement Formatting (#1181)
+
+**The Problem:** When extracting code from web pages, import statements were sometimes concatenated without proper line separation.
+
+**The Fix:** Import statements now maintain proper newline separation.
+
+```python
+# BEFORE: "import osimport sysfrom pathlib import Path"
+# AFTER:
+# import os
+# import sys
+# from pathlib import Path
+```
+
+---
+
+## Breaking Changes
+
+**None!** This release is fully backward compatible.
+
+- All existing code continues to work without modification
+- New parameters have sensible defaults matching previous behavior
+- No API changes to existing functionality
+
+---
+
+## Upgrade Instructions
+
+### Python Package
+
+```bash
+pip install --upgrade crawl4ai
+# or
+pip install crawl4ai==0.7.8
+```
+
+### Docker
+
+```bash
+# Pull the latest version
+docker pull unclecode/crawl4ai:0.7.8
+
+# Run
+docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.8
+```
+
+---
+
+## Verification
+
+Run the verification tests to confirm all fixes are working:
+
+```bash
+python docs/releases_review/demo_v0.7.8.py
+```
+
+This runs actual tests that verify each bug fix is properly implemented.
+
+---
+
+## Acknowledgments
+
+Thank you to everyone who reported these issues and provided detailed reproduction steps. Your bug reports make Crawl4AI better for everyone.
+
+Issues fixed: #1642, #1638, #1629, #1621, #1412, #1269, #1268, #1181, #1178, #1116, #678
+
+---
+
+## Support & Resources
+
+- **Documentation**: [docs.crawl4ai.com](https://docs.crawl4ai.com)
+- **GitHub**: [github.com/unclecode/crawl4ai](https://github.com/unclecode/crawl4ai)
+- **Discord**: [discord.gg/crawl4ai](https://discord.gg/jP8KfhDhyN)
+- **Twitter**: [@unclecode](https://x.com/unclecode)
+
+---
+
+**This stability release ensures Crawl4AI works reliably across Docker deployments, LLM extraction workflows, and various edge cases. Thank you for your continued support and feedback!**
+
+**Happy crawling!**
+
+*- unclecode*
diff --git a/docs/md_v2/complete-sdk-reference.md b/docs/md_v2/complete-sdk-reference.md
index d4a5ba65..7e6abf5c 100644
--- a/docs/md_v2/complete-sdk-reference.md
+++ b/docs/md_v2/complete-sdk-reference.md
@@ -1593,8 +1593,20 @@ The `clone()` method:
- Environment variable - use with prefix "env:" eg:`api_token = "env: GROQ_API_KEY"`
3. **`base_url`**:
- If your provider has a custom endpoint
+
+4. **Backoff controls** *(optional)*:
+ - `backoff_base_delay` *(default `2` seconds)* โ how long to pause before the first retry if the provider rate-limits you.
+ - `backoff_max_attempts` *(default `3`)* โ total tries for the same prompt (initial call + retries).
+ - `backoff_exponential_factor` *(default `2`)* โ how quickly the pause grows between retries. A factor of 2 yields waits like 2s โ 4s โ 8s.
+ - Because these plug into Crawl4AIโs retry helper, every LLM strategy automatically follows the pacing you define here.
```python
-llm_config = LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY"))
+llm_config = LLMConfig(
+ provider="openai/gpt-4o-mini",
+ api_token=os.getenv("OPENAI_API_KEY"),
+ backoff_base_delay=1, # optional
+ backoff_max_attempts=5, # optional
+ backoff_exponential_factor=3, # optional
+)
```
## 4. Putting It All Together
In a typical scenario, you define **one** `BrowserConfig` for your crawler session, then create **one or more** `CrawlerRunConfig` & `LLMConfig` depending on each call's needs:
diff --git a/docs/md_v2/core/browser-crawler-config.md b/docs/md_v2/core/browser-crawler-config.md
index 5bee2368..a0e59fd0 100644
--- a/docs/md_v2/core/browser-crawler-config.md
+++ b/docs/md_v2/core/browser-crawler-config.md
@@ -308,8 +308,20 @@ The `clone()` method:
3.โ **`base_url`**:
- If your provider has a custom endpoint
+4.โ **Retry/backoff controls** *(optional)*:
+ - `backoff_base_delay` *(default `2` seconds)* โ base delay inserted before the first retry when the provider returns a rate-limit response.
+ - `backoff_max_attempts` *(default `3`)* โ total number of attempts (initial call plus retries) before the request is surfaced as an error.
+ - `backoff_exponential_factor` *(default `2`)* โ growth rate for the retry delay (`delay = base_delay * factor^attempt`).
+ - These values are forwarded to the shared `perform_completion_with_backoff` helper, ensuring every strategy that consumes your `LLMConfig` honors the same throttling policy.
+
```python
-llm_config = LLMConfig(provider="openai/gpt-4o-mini", api_token=os.getenv("OPENAI_API_KEY"))
+llm_config = LLMConfig(
+ provider="openai/gpt-4o-mini",
+ api_token=os.getenv("OPENAI_API_KEY"),
+ backoff_base_delay=1, # optional
+ backoff_max_attempts=5, # optional
+ backoff_exponential_factor=3, #optional
+)
```
## 4. Putting It All Together
diff --git a/docs/md_v2/index.md b/docs/md_v2/index.md
index d9455cbb..0229aff5 100644
--- a/docs/md_v2/index.md
+++ b/docs/md_v2/index.md
@@ -55,6 +55,16 @@
+---
+#### ๐ Crawl4AI Cloud API โ Closed Beta (Launching Soon)
+Reliable, large-scale web extraction, now built to be _**drastically more cost-effective**_ than any of the existing solutions.
+
+๐ **Apply [here](https://forms.gle/E9MyPaNXACnAMaqG7) for early access**
+_Weโll be onboarding in phases and working closely with early users.
+Limited slots._
+
+---
+
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for large language models, AI agents, and data pipelines. Fully open source, flexible, and built for real-time performance, **Crawl4AI** empowers developers with unmatched speed, precision, and deployment ease.
> Enjoy using Crawl4AI? Consider **[becoming a sponsor](https://github.com/sponsors/unclecode)** to support ongoing development and community growth!
diff --git a/docs/releases_review/demo_v0.7.8.py b/docs/releases_review/demo_v0.7.8.py
new file mode 100644
index 00000000..5fb75210
--- /dev/null
+++ b/docs/releases_review/demo_v0.7.8.py
@@ -0,0 +1,910 @@
+#!/usr/bin/env python3
+"""
+Crawl4AI v0.7.8 Release Demo - Verification Tests
+==================================================
+
+This demo ACTUALLY RUNS and VERIFIES the bug fixes in v0.7.8.
+Each test executes real code and validates the fix is working.
+
+Bug Fixes Verified:
+1. ProxyConfig JSON serialization (#1629)
+2. Configurable backoff parameters (#1269)
+3. LLM Strategy input_format support (#1178)
+4. Raw HTML URL variable (#1116)
+5. Relative URLs after redirects (#1268)
+6. pypdf migration (#1412)
+7. Pydantic v2 ConfigDict (#678)
+8. Docker ContentRelevanceFilter (#1642) - requires Docker
+9. Docker .cache permissions (#1638) - requires Docker
+10. AdaptiveCrawler query expansion (#1621) - requires LLM API key
+11. Import statement formatting (#1181)
+
+Usage:
+ python docs/releases_review/demo_v0.7.8.py
+
+For Docker tests:
+ docker run -d -p 11235:11235 --shm-size=1g unclecode/crawl4ai:0.7.8
+ python docs/releases_review/demo_v0.7.8.py
+"""
+
+import asyncio
+import json
+import sys
+import warnings
+import os
+import tempfile
+from typing import Tuple, Optional
+from dataclasses import dataclass
+
+# Test results tracking
+@dataclass
+class TestResult:
+ name: str
+ issue: str
+ passed: bool
+ message: str
+ skipped: bool = False
+
+
+results: list[TestResult] = []
+
+
+def print_header(title: str):
+ print(f"\n{'=' * 70}")
+ print(f"{title}")
+ print(f"{'=' * 70}")
+
+
+def print_test(name: str, issue: str):
+ print(f"\n[TEST] {name} ({issue})")
+ print("-" * 50)
+
+
+def record_result(name: str, issue: str, passed: bool, message: str, skipped: bool = False):
+ results.append(TestResult(name, issue, passed, message, skipped))
+ if skipped:
+ print(f" SKIPPED: {message}")
+ elif passed:
+ print(f" PASSED: {message}")
+ else:
+ print(f" FAILED: {message}")
+
+
+# =============================================================================
+# TEST 1: ProxyConfig JSON Serialization (#1629)
+# =============================================================================
+async def test_proxy_config_serialization():
+ """
+ Verify BrowserConfig.to_dict() properly serializes ProxyConfig to JSON.
+
+ BEFORE: ProxyConfig was included as object, causing JSON serialization to fail
+ AFTER: ProxyConfig.to_dict() is called, producing valid JSON
+ """
+ print_test("ProxyConfig JSON Serialization", "#1629")
+
+ try:
+ from crawl4ai import BrowserConfig
+ from crawl4ai.async_configs import ProxyConfig
+
+ # Create config with ProxyConfig
+ proxy = ProxyConfig(
+ server="http://proxy.example.com:8080",
+ username="testuser",
+ password="testpass"
+ )
+ browser_config = BrowserConfig(headless=True, proxy_config=proxy)
+
+ # Test 1: to_dict() should return dict for proxy_config
+ config_dict = browser_config.to_dict()
+ proxy_dict = config_dict.get('proxy_config')
+
+ if not isinstance(proxy_dict, dict):
+ record_result("ProxyConfig Serialization", "#1629", False,
+ f"proxy_config is {type(proxy_dict)}, expected dict")
+ return
+
+ # Test 2: Should be JSON serializable
+ try:
+ json_str = json.dumps(config_dict)
+ json.loads(json_str) # Verify valid JSON
+ except (TypeError, json.JSONDecodeError) as e:
+ record_result("ProxyConfig Serialization", "#1629", False,
+ f"JSON serialization failed: {e}")
+ return
+
+ # Test 3: Verify proxy data is preserved
+ if proxy_dict.get('server') != "http://proxy.example.com:8080":
+ record_result("ProxyConfig Serialization", "#1629", False,
+ "Proxy server not preserved in serialization")
+ return
+
+ record_result("ProxyConfig Serialization", "#1629", True,
+ "BrowserConfig with ProxyConfig serializes to valid JSON")
+
+ except Exception as e:
+ record_result("ProxyConfig Serialization", "#1629", False, f"Exception: {e}")
+
+
+# =============================================================================
+# TEST 2: Configurable Backoff Parameters (#1269)
+# =============================================================================
+async def test_configurable_backoff():
+ """
+ Verify LLMConfig accepts and stores backoff configuration parameters.
+
+ BEFORE: Backoff was hardcoded (delay=2, attempts=3, factor=2)
+ AFTER: LLMConfig accepts backoff_base_delay, backoff_max_attempts, backoff_exponential_factor
+ """
+ print_test("Configurable Backoff Parameters", "#1269")
+
+ try:
+ from crawl4ai import LLMConfig
+
+ # Test 1: Default values
+ default_config = LLMConfig(provider="openai/gpt-4o-mini")
+
+ if default_config.backoff_base_delay != 2:
+ record_result("Configurable Backoff", "#1269", False,
+ f"Default base_delay is {default_config.backoff_base_delay}, expected 2")
+ return
+
+ if default_config.backoff_max_attempts != 3:
+ record_result("Configurable Backoff", "#1269", False,
+ f"Default max_attempts is {default_config.backoff_max_attempts}, expected 3")
+ return
+
+ if default_config.backoff_exponential_factor != 2:
+ record_result("Configurable Backoff", "#1269", False,
+ f"Default exponential_factor is {default_config.backoff_exponential_factor}, expected 2")
+ return
+
+ # Test 2: Custom values
+ custom_config = LLMConfig(
+ provider="openai/gpt-4o-mini",
+ backoff_base_delay=5,
+ backoff_max_attempts=10,
+ backoff_exponential_factor=3
+ )
+
+ if custom_config.backoff_base_delay != 5:
+ record_result("Configurable Backoff", "#1269", False,
+ f"Custom base_delay is {custom_config.backoff_base_delay}, expected 5")
+ return
+
+ if custom_config.backoff_max_attempts != 10:
+ record_result("Configurable Backoff", "#1269", False,
+ f"Custom max_attempts is {custom_config.backoff_max_attempts}, expected 10")
+ return
+
+ if custom_config.backoff_exponential_factor != 3:
+ record_result("Configurable Backoff", "#1269", False,
+ f"Custom exponential_factor is {custom_config.backoff_exponential_factor}, expected 3")
+ return
+
+ # Test 3: to_dict() includes backoff params
+ config_dict = custom_config.to_dict()
+ if 'backoff_base_delay' not in config_dict:
+ record_result("Configurable Backoff", "#1269", False,
+ "backoff_base_delay missing from to_dict()")
+ return
+
+ record_result("Configurable Backoff", "#1269", True,
+ "LLMConfig accepts and stores custom backoff parameters")
+
+ except Exception as e:
+ record_result("Configurable Backoff", "#1269", False, f"Exception: {e}")
+
+
+# =============================================================================
+# TEST 3: LLM Strategy Input Format (#1178)
+# =============================================================================
+async def test_llm_input_format():
+ """
+ Verify LLMExtractionStrategy accepts input_format parameter.
+
+ BEFORE: Always used markdown input
+ AFTER: Supports "markdown", "html", "fit_markdown", "cleaned_html", "fit_html"
+ """
+ print_test("LLM Strategy Input Format", "#1178")
+
+ try:
+ from crawl4ai import LLMExtractionStrategy, LLMConfig
+
+ llm_config = LLMConfig(provider="openai/gpt-4o-mini")
+
+ # Test 1: Default is markdown
+ default_strategy = LLMExtractionStrategy(
+ llm_config=llm_config,
+ instruction="Extract data"
+ )
+
+ if default_strategy.input_format != "markdown":
+ record_result("LLM Input Format", "#1178", False,
+ f"Default input_format is '{default_strategy.input_format}', expected 'markdown'")
+ return
+
+ # Test 2: Can set to html
+ html_strategy = LLMExtractionStrategy(
+ llm_config=llm_config,
+ instruction="Extract data",
+ input_format="html"
+ )
+
+ if html_strategy.input_format != "html":
+ record_result("LLM Input Format", "#1178", False,
+ f"HTML input_format is '{html_strategy.input_format}', expected 'html'")
+ return
+
+ # Test 3: Can set to fit_markdown
+ fit_strategy = LLMExtractionStrategy(
+ llm_config=llm_config,
+ instruction="Extract data",
+ input_format="fit_markdown"
+ )
+
+ if fit_strategy.input_format != "fit_markdown":
+ record_result("LLM Input Format", "#1178", False,
+ f"fit_markdown input_format is '{fit_strategy.input_format}'")
+ return
+
+ record_result("LLM Input Format", "#1178", True,
+ "LLMExtractionStrategy accepts all input_format options")
+
+ except Exception as e:
+ record_result("LLM Input Format", "#1178", False, f"Exception: {e}")
+
+
+# =============================================================================
+# TEST 4: Raw HTML URL Variable (#1116)
+# =============================================================================
+async def test_raw_html_url_variable():
+ """
+ Verify that raw: prefix URLs pass "Raw HTML" to extraction strategy.
+
+ BEFORE: Entire HTML blob was passed as URL parameter
+ AFTER: "Raw HTML" string is passed as URL parameter
+ """
+ print_test("Raw HTML URL Variable", "#1116")
+
+ try:
+ from crawl4ai import AsyncWebCrawler, CrawlerRunConfig
+ from crawl4ai.extraction_strategy import ExtractionStrategy
+
+ # Custom strategy to capture what URL is passed
+ class URLCapturingStrategy(ExtractionStrategy):
+ captured_url = None
+
+ def extract(self, url: str, html: str, *args, **kwargs):
+ URLCapturingStrategy.captured_url = url
+ return [{"content": "test"}]
+
+ html_content = "Test "
+ strategy = URLCapturingStrategy()
+
+ async with AsyncWebCrawler() as crawler:
+ result = await crawler.arun(
+ url=f"raw:{html_content}",
+ config=CrawlerRunConfig(
+ extraction_strategy=strategy
+ )
+ )
+
+ captured = URLCapturingStrategy.captured_url
+
+ if captured is None:
+ record_result("Raw HTML URL Variable", "#1116", False,
+ "Extraction strategy was not called")
+ return
+
+ if captured == html_content or captured.startswith("
+
+
+import os
+import sys
+from pathlib import Path
+from typing import List, Dict
+
+def main():
+ pass
+
+
+
+ """
+
+ async with AsyncWebCrawler() as crawler:
+ result = await crawler.arun(
+ url=f"raw:{html_with_code}",
+ config=CrawlerRunConfig()
+ )
+
+ markdown = result.markdown.raw_markdown if result.markdown else ""
+
+ # Check that imports are not concatenated on the same line
+ # Bad: "import osimport sys" (no newline between statements)
+ # This is the actual bug - statements getting merged on same line
+ bad_patterns = [
+ "import os import sys", # Space but no newline
+ "import osimport sys", # No space or newline
+ "import os from pathlib", # Space but no newline
+ "import osfrom pathlib", # No space or newline
+ ]
+
+ markdown_single_line = markdown.replace('\n', ' ') # Convert newlines to spaces
+
+ for pattern in bad_patterns:
+ # Check if pattern exists without proper line separation
+ if pattern.replace(' ', '') in markdown_single_line.replace(' ', ''):
+ # Verify it's actually on same line (not just adjacent after newline removal)
+ lines = markdown.split('\n')
+ for line in lines:
+ if 'import' in line.lower():
+ # Count import statements on this line
+ import_count = line.lower().count('import ')
+ if import_count > 1:
+ record_result("Import Formatting", "#1181", False,
+ f"Multiple imports on same line: {line[:60]}...")
+ return
+
+ # Verify imports are present
+ if "import" in markdown.lower():
+ record_result("Import Formatting", "#1181", True,
+ "Import statements are properly line-separated")
+ else:
+ record_result("Import Formatting", "#1181", True,
+ "No import statements found to verify (test HTML may have changed)")
+
+ except Exception as e:
+ record_result("Import Formatting", "#1181", False, f"Exception: {e}")
+
+
+# =============================================================================
+# COMPREHENSIVE CRAWL TEST
+# =============================================================================
+async def test_comprehensive_crawl():
+ """
+ Run a comprehensive crawl to verify overall stability.
+ """
+ print_test("Comprehensive Crawl Test", "Overall")
+
+ try:
+ from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, BrowserConfig
+
+ async with AsyncWebCrawler(config=BrowserConfig(headless=True)) as crawler:
+ result = await crawler.arun(
+ url="https://httpbin.org/html",
+ config=CrawlerRunConfig()
+ )
+
+ # Verify result
+ checks = []
+
+ if result.success:
+ checks.append("success=True")
+ else:
+ record_result("Comprehensive Crawl", "Overall", False,
+ f"Crawl failed: {result.error_message}")
+ return
+
+ if result.html and len(result.html) > 100:
+ checks.append(f"html={len(result.html)} chars")
+
+ if result.markdown and result.markdown.raw_markdown:
+ checks.append(f"markdown={len(result.markdown.raw_markdown)} chars")
+
+ if result.redirected_url:
+ checks.append("redirected_url present")
+
+ record_result("Comprehensive Crawl", "Overall", True,
+ f"All checks passed: {', '.join(checks)}")
+
+ except Exception as e:
+ record_result("Comprehensive Crawl", "Overall", False, f"Exception: {e}")
+
+
+# =============================================================================
+# MAIN
+# =============================================================================
+
+def print_summary():
+ """Print test results summary"""
+ print_header("TEST RESULTS SUMMARY")
+
+ passed = sum(1 for r in results if r.passed and not r.skipped)
+ failed = sum(1 for r in results if not r.passed and not r.skipped)
+ skipped = sum(1 for r in results if r.skipped)
+
+ print(f"\nTotal: {len(results)} tests")
+ print(f" Passed: {passed}")
+ print(f" Failed: {failed}")
+ print(f" Skipped: {skipped}")
+
+ if failed > 0:
+ print("\nFailed Tests:")
+ for r in results:
+ if not r.passed and not r.skipped:
+ print(f" - {r.name} ({r.issue}): {r.message}")
+
+ if skipped > 0:
+ print("\nSkipped Tests:")
+ for r in results:
+ if r.skipped:
+ print(f" - {r.name} ({r.issue}): {r.message}")
+
+ print("\n" + "=" * 70)
+ if failed == 0:
+ print("All tests passed! v0.7.8 bug fixes verified.")
+ else:
+ print(f"WARNING: {failed} test(s) failed!")
+ print("=" * 70)
+
+ return failed == 0
+
+
+async def main():
+ """Run all verification tests"""
+ print_header("Crawl4AI v0.7.8 - Bug Fix Verification Tests")
+ print("Running actual tests to verify bug fixes...")
+
+ # Run all tests
+ tests = [
+ test_proxy_config_serialization, # #1629
+ test_configurable_backoff, # #1269
+ test_llm_input_format, # #1178
+ test_raw_html_url_variable, # #1116
+ test_redirect_url_handling, # #1268
+ test_pypdf_migration, # #1412
+ test_pydantic_configdict, # #678
+ test_docker_content_filter, # #1642
+ test_docker_cache_permissions, # #1638
+ test_adaptive_crawler_embedding, # #1621
+ test_import_formatting, # #1181
+ test_comprehensive_crawl, # Overall
+ ]
+
+ for test_func in tests:
+ try:
+ await test_func()
+ except Exception as e:
+ print(f"\nTest {test_func.__name__} crashed: {e}")
+ results.append(TestResult(
+ test_func.__name__,
+ "Unknown",
+ False,
+ f"Crashed: {e}"
+ ))
+
+ # Print summary
+ all_passed = print_summary()
+
+ return 0 if all_passed else 1
+
+
+if __name__ == "__main__":
+ try:
+ exit_code = asyncio.run(main())
+ sys.exit(exit_code)
+ except KeyboardInterrupt:
+ print("\n\nTests interrupted by user.")
+ sys.exit(1)
+ except Exception as e:
+ print(f"\n\nTest suite failed: {e}")
+ import traceback
+ traceback.print_exc()
+ sys.exit(1)
diff --git a/pyproject.toml b/pyproject.toml
index faa545bc..06d1e4ab 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -59,13 +59,13 @@ classifiers = [
]
[project.optional-dependencies]
-pdf = ["PyPDF2"]
+pdf = ["pypdf"]
torch = ["torch", "nltk", "scikit-learn"]
transformer = ["transformers", "tokenizers", "sentence-transformers"]
cosine = ["torch", "transformers", "nltk", "sentence-transformers"]
sync = ["selenium"]
all = [
- "PyPDF2",
+ "pypdf",
"torch",
"nltk",
"scikit-learn",
diff --git a/requirements.txt b/requirements.txt
index 24b243ef..7d92cbea 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -33,4 +33,4 @@ shapely>=2.0.0
fake-useragent>=2.2.0
pdf2image>=1.17.0
-PyPDF2>=3.0.1
\ No newline at end of file
+pypdf>=6.0.0
\ No newline at end of file
diff --git a/tests/async/test_redirect_url_resolution.py b/tests/async/test_redirect_url_resolution.py
new file mode 100644
index 00000000..cce3e512
--- /dev/null
+++ b/tests/async/test_redirect_url_resolution.py
@@ -0,0 +1,118 @@
+"""Test delayed redirect WITH wait_for - does link resolution use correct URL?"""
+import asyncio
+import threading
+from http.server import HTTPServer, SimpleHTTPRequestHandler
+
+class RedirectTestHandler(SimpleHTTPRequestHandler):
+ def log_message(self, format, *args):
+ pass
+
+ def do_GET(self):
+ if self.path == "/page-a":
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+ content = """
+
+
+ Page A
+
+ Page A - Will redirect after 200ms
+
+
+
+ """
+ self.wfile.write(content.encode())
+ elif self.path.startswith("/redirect-target"):
+ self.send_response(200)
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+ content = """
+
+
+ Redirect Target
+
+ Redirect Target
+
+ Subpage 1
+ Subpage 2
+
+
+
+ """
+ self.wfile.write(content.encode())
+ else:
+ self.send_response(404)
+ self.end_headers()
+
+async def main():
+ import socket
+ class ReuseAddrHTTPServer(HTTPServer):
+ allow_reuse_address = True
+
+ server = ReuseAddrHTTPServer(("localhost", 8769), RedirectTestHandler)
+ thread = threading.Thread(target=server.serve_forever)
+ thread.daemon = True
+ thread.start()
+
+ try:
+ import sys
+ sys.path.insert(0, '/Users/nasrin/vscode/c4ai-uc/develop')
+ from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig
+
+ print("=" * 60)
+ print("TEST: Delayed JS redirect WITH wait_for='css:#target-nav'")
+ print("This waits for the redirect to complete")
+ print("=" * 60)
+
+ browser_config = BrowserConfig(headless=True, verbose=False)
+ crawl_config = CrawlerRunConfig(
+ cache_mode="bypass",
+ wait_for="css:#target-nav" # Wait for element on redirect target
+ )
+
+ async with AsyncWebCrawler(config=browser_config) as crawler:
+ result = await crawler.arun(
+ url="http://localhost:8769/page-a",
+ config=crawl_config
+ )
+
+ print(f"Original URL: http://localhost:8769/page-a")
+ print(f"Redirected URL returned: {result.redirected_url}")
+ print(f"HTML contains 'Redirect Target': {'Redirect Target' in result.html}")
+ print()
+
+ if "/redirect-target" in (result.redirected_url or ""):
+ print("โ redirected_url is CORRECT")
+ else:
+ print("โ BUG #1: redirected_url is WRONG - still shows original URL!")
+
+ # Check links
+ all_links = []
+ if isinstance(result.links, dict):
+ all_links = result.links.get("internal", []) + result.links.get("external", [])
+
+ print(f"\nLinks found ({len(all_links)} total):")
+ bug_found = False
+ for link in all_links:
+ href = link.get("href", "") if isinstance(link, dict) else getattr(link, 'href', "")
+ if "subpage" in href:
+ print(f" {href}")
+ if "/page-a/" in href:
+ print(" ^^^ BUG #2: Link resolved with WRONG base URL!")
+ bug_found = True
+ elif "/redirect-target/" in href:
+ print(" ^^^ CORRECT")
+
+ if not bug_found and all_links:
+ print("\nโ Link resolution is CORRECT")
+
+ finally:
+ server.shutdown()
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/tests/check_dependencies.py b/tests/check_dependencies.py
index e47ec372..5216e2cc 100755
--- a/tests/check_dependencies.py
+++ b/tests/check_dependencies.py
@@ -71,7 +71,7 @@ PACKAGE_MAPPINGS = {
'sentence_transformers': 'sentence-transformers',
'rank_bm25': 'rank-bm25',
'snowballstemmer': 'snowballstemmer',
- 'PyPDF2': 'PyPDF2',
+ 'pypdf': 'pypdf',
'pdf2image': 'pdf2image',
}
diff --git a/tests/docker/test_filter_deep_crawl.py b/tests/docker/test_filter_deep_crawl.py
index 4ee0df40..9e82073c 100644
--- a/tests/docker/test_filter_deep_crawl.py
+++ b/tests/docker/test_filter_deep_crawl.py
@@ -1,16 +1,31 @@
"""
Test the complete fix for both the filter serialization and JSON serialization issues.
"""
+import os
+import traceback
+from typing import Any
import asyncio
import httpx
from crawl4ai import BrowserConfig, CacheMode, CrawlerRunConfig
-from crawl4ai.deep_crawling import BFSDeepCrawlStrategy, FilterChain, URLPatternFilter
+from crawl4ai.deep_crawling import (
+ BFSDeepCrawlStrategy,
+ ContentRelevanceFilter,
+ FilterChain,
+ URLFilter,
+ URLPatternFilter,
+)
-BASE_URL = "http://localhost:11234/" # Adjust port as needed
+CRAWL4AI_DOCKER_PORT = os.environ.get("CRAWL4AI_DOCKER_PORT", "11234")
+try:
+ BASE_PORT = int(CRAWL4AI_DOCKER_PORT)
+except TypeError:
+ BASE_PORT = 11234
+BASE_URL = f"http://localhost:{BASE_PORT}/" # Adjust port as needed
-async def test_with_docker_client():
+
+async def test_with_docker_client(filter_chain: list[URLFilter], max_pages: int = 20, timeout: int = 30) -> bool:
"""Test using the Docker client (same as 1419.py)."""
from crawl4ai.docker_client import Crawl4aiDockerClient
@@ -24,19 +39,10 @@ async def test_with_docker_client():
verbose=True,
) as client:
- # Create filter chain - testing the serialization fix
- filter_chain = [
- URLPatternFilter(
- # patterns=["*about*", "*privacy*", "*terms*"],
- patterns=["*advanced*"],
- reverse=True
- ),
- ]
-
crawler_config = CrawlerRunConfig(
deep_crawl_strategy=BFSDeepCrawlStrategy(
max_depth=2, # Keep it shallow for testing
- # max_pages=5, # Limit pages for testing
+ max_pages=max_pages, # Limit pages for testing
filter_chain=FilterChain(filter_chain)
),
cache_mode=CacheMode.BYPASS,
@@ -47,6 +53,7 @@ async def test_with_docker_client():
["https://docs.crawl4ai.com"], # Simple test page
browser_config=BrowserConfig(headless=True),
crawler_config=crawler_config,
+ hooks_timeout=timeout,
)
if results:
@@ -74,12 +81,11 @@ async def test_with_docker_client():
except Exception as e:
print(f"โ Docker client test failed: {e}")
- import traceback
traceback.print_exc()
return False
-async def test_with_rest_api():
+async def test_with_rest_api(filters: list[dict[str, Any]], max_pages: int = 20, timeout: int = 30) -> bool:
"""Test using REST API directly."""
print("\n" + "=" * 60)
print("Testing with REST API")
@@ -90,19 +96,11 @@ async def test_with_rest_api():
"type": "BFSDeepCrawlStrategy",
"params": {
"max_depth": 2,
- # "max_pages": 5,
+ "max_pages": max_pages,
"filter_chain": {
"type": "FilterChain",
"params": {
- "filters": [
- {
- "type": "URLPatternFilter",
- "params": {
- "patterns": ["*advanced*"],
- "reverse": True
- }
- }
- ]
+ "filters": filters
}
}
}
@@ -126,7 +124,7 @@ async def test_with_rest_api():
response = await client.post(
f"{BASE_URL}crawl",
json=crawl_payload,
- timeout=30
+ timeout=timeout,
)
if response.status_code == 200:
@@ -150,7 +148,6 @@ async def test_with_rest_api():
except Exception as e:
print(f"โ REST API test failed: {e}")
- import traceback
traceback.print_exc()
return False
@@ -165,12 +162,62 @@ async def main():
results = []
# Test 1: Docker client
- docker_passed = await test_with_docker_client()
- results.append(("Docker Client", docker_passed))
+ max_pages_ = [20, 5]
+ timeouts = [30, 60]
+ filter_chain_test_cases = [
+ [
+ URLPatternFilter(
+ # patterns=["*about*", "*privacy*", "*terms*"],
+ patterns=["*advanced*"],
+ reverse=True
+ ),
+ ],
+ [
+ ContentRelevanceFilter(
+ query="about faq",
+ threshold=0.2,
+ ),
+ ],
+ ]
+ for idx, (filter_chain, max_pages, timeout) in enumerate(zip(filter_chain_test_cases, max_pages_, timeouts)):
+ docker_passed = await test_with_docker_client(filter_chain=filter_chain, max_pages=max_pages, timeout=timeout)
+ results.append((f"Docker Client w/ filter chain {idx}", docker_passed))
# Test 2: REST API
- rest_passed = await test_with_rest_api()
- results.append(("REST API", rest_passed))
+ max_pages_ = [20, 5, 5]
+ timeouts = [30, 60, 60]
+ filters_test_cases = [
+ [
+ {
+ "type": "URLPatternFilter",
+ "params": {
+ "patterns": ["*advanced*"],
+ "reverse": True
+ }
+ }
+ ],
+ [
+ {
+ "type": "ContentRelevanceFilter",
+ "params": {
+ "query": "about faq",
+ "threshold": 0.2,
+ }
+ }
+ ],
+ [
+ {
+ "type": "ContentRelevanceFilter",
+ "params": {
+ "query": ["about", "faq"],
+ "threshold": 0.2,
+ }
+ }
+ ],
+ ]
+ for idx, (filters, max_pages, timeout) in enumerate(zip(filters_test_cases, max_pages_, timeouts)):
+ rest_passed = await test_with_rest_api(filters=filters, max_pages=max_pages, timeout=timeout)
+ results.append((f"REST API w/ filters {idx}", rest_passed))
# Summary
print("\n" + "=" * 60)
@@ -186,10 +233,7 @@ async def main():
print("=" * 60)
if all_passed:
- print("๐ ALL TESTS PASSED! Both issues are fully resolved!")
- print("\nThe fixes:")
- print("1. Filter serialization: Fixed by not serializing private __slots__")
- print("2. JSON serialization: Fixed by removing property descriptors from model_dump()")
+ print("๐ ALL TESTS PASSED!")
else:
print("โ ๏ธ Some tests failed. Please check the server logs for details.")
@@ -198,4 +242,4 @@ async def main():
if __name__ == "__main__":
import sys
- sys.exit(asyncio.run(main()))
\ No newline at end of file
+ sys.exit(asyncio.run(main()))
diff --git a/tests/general/test_async_webcrawler.py b/tests/general/test_async_webcrawler.py
index 4d7aa815..80d4acbe 100644
--- a/tests/general/test_async_webcrawler.py
+++ b/tests/general/test_async_webcrawler.py
@@ -9,6 +9,21 @@ from crawl4ai import (
RateLimiter,
CacheMode
)
+from crawl4ai.extraction_strategy import ExtractionStrategy
+
+class MockExtractionStrategy(ExtractionStrategy):
+ """Mock extraction strategy for testing URL parameter handling"""
+
+ def __init__(self):
+ super().__init__()
+ self.run_calls = []
+
+ def extract(self, url: str, html: str, *args, **kwargs):
+ return [{"test": "data"}]
+
+ def run(self, url: str, sections: List[str], *args, **kwargs):
+ self.run_calls.append(url)
+ return super().run(url, sections, *args, **kwargs)
@pytest.mark.asyncio
@pytest.mark.parametrize("viewport", [
@@ -142,8 +157,72 @@ async def test_error_handling(error_url):
assert not result.success
assert result.error_message is not None
+@pytest.mark.asyncio
+async def test_extraction_strategy_run_with_regular_url():
+ """
+ Regression test for extraction_strategy.run URL parameter handling with regular URLs.
+
+ This test verifies that when is_raw_html=False (regular URL),
+ extraction_strategy.run is called with the actual URL.
+ """
+ browser_config = BrowserConfig(
+ browser_type="chromium",
+ headless=True
+ )
+
+ async with AsyncWebCrawler(config=browser_config) as crawler:
+ mock_strategy = MockExtractionStrategy()
+
+ # Test regular URL (is_raw_html=False)
+ regular_url = "https://example.com"
+ result = await crawler.arun(
+ url=regular_url,
+ config=CrawlerRunConfig(
+ page_timeout=30000,
+ extraction_strategy=mock_strategy,
+ cache_mode=CacheMode.BYPASS
+ )
+ )
+
+ assert result.success
+ assert len(mock_strategy.run_calls) == 1
+ assert mock_strategy.run_calls[0] == regular_url, f"Expected '{regular_url}', got '{mock_strategy.run_calls[0]}'"
+
+@pytest.mark.asyncio
+async def test_extraction_strategy_run_with_raw_html():
+ """
+ Regression test for extraction_strategy.run URL parameter handling with raw HTML.
+
+ This test verifies that when is_raw_html=True (URL starts with "raw:"),
+ extraction_strategy.run is called with "Raw HTML" instead of the actual URL.
+ """
+ browser_config = BrowserConfig(
+ browser_type="chromium",
+ headless=True
+ )
+
+ async with AsyncWebCrawler(config=browser_config) as crawler:
+ mock_strategy = MockExtractionStrategy()
+
+ # Test raw HTML URL (is_raw_html=True automatically set)
+ raw_html_url = "raw:Test HTML This is a test.
"
+ result = await crawler.arun(
+ url=raw_html_url,
+ config=CrawlerRunConfig(
+ page_timeout=30000,
+ extraction_strategy=mock_strategy,
+ cache_mode=CacheMode.BYPASS
+ )
+ )
+
+ assert result.success
+ assert len(mock_strategy.run_calls) == 1
+ assert mock_strategy.run_calls[0] == "Raw HTML", f"Expected 'Raw HTML', got '{mock_strategy.run_calls[0]}'"
+
if __name__ == "__main__":
asyncio.run(test_viewport_config((1024, 768)))
asyncio.run(test_memory_management())
asyncio.run(test_rate_limiting())
- asyncio.run(test_javascript_execution())
\ No newline at end of file
+ asyncio.run(test_javascript_execution())
+ asyncio.run(test_extraction_strategy_run_with_regular_url())
+ asyncio.run(test_extraction_strategy_run_with_raw_html())