From 27af4cc27bafa5bfa60849c5c124eb3ae47ec987 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Martins?= <11438285+jl-martins@users.noreply.github.com> Date: Sat, 15 Feb 2025 15:34:59 +0000 Subject: [PATCH 01/11] Fix "raw://" URL parsing logic Closes https://github.com/unclecode/crawl4ai/issues/686 --- crawl4ai/async_crawler_strategy.py | 4 +-- tests/20241401/test_async_crawler_strategy.py | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/crawl4ai/async_crawler_strategy.py b/crawl4ai/async_crawler_strategy.py index 62ee4c65..5e3c2519 100644 --- a/crawl4ai/async_crawler_strategy.py +++ b/crawl4ai/async_crawler_strategy.py @@ -1231,9 +1231,9 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): get_delayed_content=None, ) - elif url.startswith("raw:") or url.startswith("raw://"): + elif url.startswith("raw:"): # Process raw HTML content - raw_html = url[4:] if url[:4] == "raw:" else url[7:] + raw_html = url[6:] if url.startswith("raw://") else url[4:] html = raw_html if config.screenshot: screenshot_data = await self._generate_screenshot_from_html(html) diff --git a/tests/20241401/test_async_crawler_strategy.py b/tests/20241401/test_async_crawler_strategy.py index 68fe4a88..8426fe0a 100644 --- a/tests/20241401/test_async_crawler_strategy.py +++ b/tests/20241401/test_async_crawler_strategy.py @@ -15,6 +15,24 @@ CRAWL4AI_HOME_DIR = Path(os.path.expanduser("~")).joinpath(".crawl4ai") if not CRAWL4AI_HOME_DIR.joinpath("profiles", "test_profile").exists(): CRAWL4AI_HOME_DIR.joinpath("profiles", "test_profile").mkdir(parents=True) +@pytest.fixture +def basic_html(): + return """ + + + Basic HTML + + +

Main Heading

+
+
+

Basic HTML document for testing purposes.

+
+
+ + + """ + # Test Config Files @pytest.fixture def basic_browser_config(): @@ -325,6 +343,13 @@ async def test_stealth_mode(crawler_strategy): ) assert response.status_code == 200 +@pytest.mark.asyncio +@pytest.mark.parametrize("prefix", ("raw:", "raw://")) +async def test_raw_urls(crawler_strategy, basic_html, prefix): + url = f"{prefix}{basic_html}" + response = await crawler_strategy.crawl(url, CrawlerRunConfig()) + assert response.html == basic_html + # Error Handling Tests @pytest.mark.asyncio async def test_invalid_url(): From cc95d3abd4c11a67a027c8a12621f404251f43c9 Mon Sep 17 00:00:00 2001 From: ntohidi Date: Tue, 3 Jun 2025 11:19:08 +0200 Subject: [PATCH 02/11] Fix raw URL parsing logic to correctly handle "raw://" and "raw:" prefixes. REF #1118 --- crawl4ai/async_crawler_strategy.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crawl4ai/async_crawler_strategy.py b/crawl4ai/async_crawler_strategy.py index 88d94a46..a1873bfd 100644 --- a/crawl4ai/async_crawler_strategy.py +++ b/crawl4ai/async_crawler_strategy.py @@ -466,8 +466,14 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): console_messages=captured_console, ) - elif url.startswith("raw:"): + ##### + # Since both "raw:" and "raw://" start with "raw:", the first condition is always true for both, so "raw://" will be sliced as "//...", which is incorrect. + # Fix: Check for "raw://" first, then "raw:" + # Also, the prefix "raw://" is actually 6 characters long, not 7, so it should be sliced accordingly: url[6:] + ##### + elif url.startswith("raw://") or url.startswith("raw:"): # Process raw HTML content + # raw_html = url[4:] if url[:4] == "raw:" else url[7:] raw_html = url[6:] if url.startswith("raw://") else url[4:] html = raw_html if config.screenshot: From fcc2abe4db7c2dda375f443281fcb5b5eff9039c Mon Sep 17 00:00:00 2001 From: ntohidi Date: Tue, 3 Jun 2025 12:53:59 +0200 Subject: [PATCH 03/11] (fix): Update document about LLM extraction strategy to use LLMConfig. REF #1146 --- docs/md_v2/extraction/llm-strategies.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/md_v2/extraction/llm-strategies.md b/docs/md_v2/extraction/llm-strategies.md index 9f6a6b3e..7c488094 100644 --- a/docs/md_v2/extraction/llm-strategies.md +++ b/docs/md_v2/extraction/llm-strategies.md @@ -218,7 +218,7 @@ import json import asyncio from typing import List from pydantic import BaseModel, Field -from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode +from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig from crawl4ai.extraction_strategy import LLMExtractionStrategy class Entity(BaseModel): @@ -238,8 +238,8 @@ class KnowledgeGraph(BaseModel): async def main(): # LLM extraction strategy llm_strat = LLMExtractionStrategy( - llmConfig = LlmConfig(provider="openai/gpt-4", api_token=os.getenv('OPENAI_API_KEY')), - schema=KnowledgeGraph.schema_json(), + llmConfig = LLMConfig(provider="openai/gpt-4", api_token=os.getenv('OPENAI_API_KEY')), + schema=KnowledgeGraph.model_json_schema(), extraction_type="schema", instruction="Extract entities and relationships from the content. Return valid JSON.", chunk_token_threshold=1400, @@ -258,6 +258,10 @@ async def main(): url = "https://www.nbcnews.com/business" result = await crawler.arun(url=url, config=crawl_config) + print("--- LLM RAW RESPONSE ---") + print(result.extracted_content) + print("--- END LLM RAW RESPONSE ---") + if result.success: with open("kb_result.json", "w", encoding="utf-8") as f: f.write(result.extracted_content) From 022cc2d92aabc1f9d64bc5f49b1fd442d10f66be Mon Sep 17 00:00:00 2001 From: Markus Zimmermann Date: Thu, 5 Jun 2025 15:30:38 +0200 Subject: [PATCH 04/11] fix, Typo --- deploy/docker/c4ai-doc-context.md | 2 +- docs/md_v2/api/parameters.md | 2 +- docs/md_v2/core/browser-crawler-config.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deploy/docker/c4ai-doc-context.md b/deploy/docker/c4ai-doc-context.md index 1642f85e..ad7bff8e 100644 --- a/deploy/docker/c4ai-doc-context.md +++ b/deploy/docker/c4ai-doc-context.md @@ -332,7 +332,7 @@ The `clone()` method: ### Key fields to note 1. **`provider`**: -- Which LLM provoder to use. +- Which LLM provider to use. - Possible values are `"ollama/llama3","groq/llama3-70b-8192","groq/llama3-8b-8192", "openai/gpt-4o-mini" ,"openai/gpt-4o","openai/o1-mini","openai/o1-preview","openai/o3-mini","openai/o3-mini-high","anthropic/claude-3-haiku-20240307","anthropic/claude-3-opus-20240229","anthropic/claude-3-sonnet-20240229","anthropic/claude-3-5-sonnet-20240620","gemini/gemini-pro","gemini/gemini-1.5-pro","gemini/gemini-2.0-flash","gemini/gemini-2.0-flash-exp","gemini/gemini-2.0-flash-lite-preview-02-05","deepseek/deepseek-chat"`
*(default: `"openai/gpt-4o-mini"`)* 2. **`api_token`**: diff --git a/docs/md_v2/api/parameters.md b/docs/md_v2/api/parameters.md index c7ac21ae..eec25480 100644 --- a/docs/md_v2/api/parameters.md +++ b/docs/md_v2/api/parameters.md @@ -259,7 +259,7 @@ LLMConfig is useful to pass LLM provider config to strategies and functions that ## 3.1 Parameters | **Parameter** | **Type / Default** | **What It Does** | |-----------------------|----------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------| -| **`provider`** | `"ollama/llama3","groq/llama3-70b-8192","groq/llama3-8b-8192", "openai/gpt-4o-mini" ,"openai/gpt-4o","openai/o1-mini","openai/o1-preview","openai/o3-mini","openai/o3-mini-high","anthropic/claude-3-haiku-20240307","anthropic/claude-3-opus-20240229","anthropic/claude-3-sonnet-20240229","anthropic/claude-3-5-sonnet-20240620","gemini/gemini-pro","gemini/gemini-1.5-pro","gemini/gemini-2.0-flash","gemini/gemini-2.0-flash-exp","gemini/gemini-2.0-flash-lite-preview-02-05","deepseek/deepseek-chat"`
*(default: `"openai/gpt-4o-mini"`)* | Which LLM provoder to use. +| **`provider`** | `"ollama/llama3","groq/llama3-70b-8192","groq/llama3-8b-8192", "openai/gpt-4o-mini" ,"openai/gpt-4o","openai/o1-mini","openai/o1-preview","openai/o3-mini","openai/o3-mini-high","anthropic/claude-3-haiku-20240307","anthropic/claude-3-opus-20240229","anthropic/claude-3-sonnet-20240229","anthropic/claude-3-5-sonnet-20240620","gemini/gemini-pro","gemini/gemini-1.5-pro","gemini/gemini-2.0-flash","gemini/gemini-2.0-flash-exp","gemini/gemini-2.0-flash-lite-preview-02-05","deepseek/deepseek-chat"`
*(default: `"openai/gpt-4o-mini"`)* | Which LLM provider to use. | **`api_token`** |1.Optional. When not provided explicitly, api_token will be read from environment variables based on provider. For example: If a gemini model is passed as provider then,`"GEMINI_API_KEY"` will be read from environment variables
2. API token of LLM provider
eg: `api_token = "gsk_1ClHGGJ7Lpn4WGybR7vNWGdyb3FY7zXEw3SCiy0BAVM9lL8CQv"`
3. Environment variable - use with prefix "env:"
eg:`api_token = "env: GROQ_API_KEY"` | API token to use for the given provider | **`base_url`** |Optional. Custom API endpoint | If your provider has a custom endpoint diff --git a/docs/md_v2/core/browser-crawler-config.md b/docs/md_v2/core/browser-crawler-config.md index 9ea8f2a1..a788152c 100644 --- a/docs/md_v2/core/browser-crawler-config.md +++ b/docs/md_v2/core/browser-crawler-config.md @@ -252,7 +252,7 @@ The `clone()` method: ### Key fields to note 1. **`provider`**: -- Which LLM provoder to use. +- Which LLM provider to use. - Possible values are `"ollama/llama3","groq/llama3-70b-8192","groq/llama3-8b-8192", "openai/gpt-4o-mini" ,"openai/gpt-4o","openai/o1-mini","openai/o1-preview","openai/o3-mini","openai/o3-mini-high","anthropic/claude-3-haiku-20240307","anthropic/claude-3-opus-20240229","anthropic/claude-3-sonnet-20240229","anthropic/claude-3-5-sonnet-20240620","gemini/gemini-pro","gemini/gemini-1.5-pro","gemini/gemini-2.0-flash","gemini/gemini-2.0-flash-exp","gemini/gemini-2.0-flash-lite-preview-02-05","deepseek/deepseek-chat"`
*(default: `"openai/gpt-4o-mini"`)* 2. **`api_token`**: From 5ac19a61d7bb052b78eb60fbfd08ffadec687c59 Mon Sep 17 00:00:00 2001 From: ntohidi Date: Thu, 5 Jun 2025 16:40:34 +0200 Subject: [PATCH 05/11] feat: Implement max_scroll_steps parameter for full page scanning. ref: #1168 --- crawl4ai/async_configs.py | 6 ++ crawl4ai/async_crawler_strategy.py | 19 ++++- tests/general/test_max_scroll.py | 115 +++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 2 deletions(-) create mode 100644 tests/general/test_max_scroll.py diff --git a/crawl4ai/async_configs.py b/crawl4ai/async_configs.py index 3fcd9911..e6cf9279 100644 --- a/crawl4ai/async_configs.py +++ b/crawl4ai/async_configs.py @@ -789,6 +789,8 @@ class CrawlerRunConfig(): Default: False. scroll_delay (float): Delay in seconds between scroll steps if scan_full_page is True. Default: 0.2. + max_scroll_steps (Optional[int]): Maximum number of scroll steps to perform during full page scan. + If None, scrolls until the entire page is loaded. Default: None. process_iframes (bool): If True, attempts to process and inline iframe content. Default: False. remove_overlay_elements (bool): If True, remove overlays/popups before extracting HTML. @@ -919,6 +921,7 @@ class CrawlerRunConfig(): ignore_body_visibility: bool = True, scan_full_page: bool = False, scroll_delay: float = 0.2, + max_scroll_steps: Optional[int] = None, process_iframes: bool = False, remove_overlay_elements: bool = False, simulate_user: bool = False, @@ -1017,6 +1020,7 @@ class CrawlerRunConfig(): self.ignore_body_visibility = ignore_body_visibility self.scan_full_page = scan_full_page self.scroll_delay = scroll_delay + self.max_scroll_steps = max_scroll_steps self.process_iframes = process_iframes self.remove_overlay_elements = remove_overlay_elements self.simulate_user = simulate_user @@ -1158,6 +1162,7 @@ class CrawlerRunConfig(): ignore_body_visibility=kwargs.get("ignore_body_visibility", True), scan_full_page=kwargs.get("scan_full_page", False), scroll_delay=kwargs.get("scroll_delay", 0.2), + max_scroll_steps=kwargs.get("max_scroll_steps"), process_iframes=kwargs.get("process_iframes", False), remove_overlay_elements=kwargs.get("remove_overlay_elements", False), simulate_user=kwargs.get("simulate_user", False), @@ -1267,6 +1272,7 @@ class CrawlerRunConfig(): "ignore_body_visibility": self.ignore_body_visibility, "scan_full_page": self.scan_full_page, "scroll_delay": self.scroll_delay, + "max_scroll_steps": self.max_scroll_steps, "process_iframes": self.process_iframes, "remove_overlay_elements": self.remove_overlay_elements, "simulate_user": self.simulate_user, diff --git a/crawl4ai/async_crawler_strategy.py b/crawl4ai/async_crawler_strategy.py index a1873bfd..d349388f 100644 --- a/crawl4ai/async_crawler_strategy.py +++ b/crawl4ai/async_crawler_strategy.py @@ -902,7 +902,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): # Handle full page scanning if config.scan_full_page: - await self._handle_full_page_scan(page, config.scroll_delay) + # await self._handle_full_page_scan(page, config.scroll_delay) + await self._handle_full_page_scan(page, config.scroll_delay, config.max_scroll_steps) # Execute JavaScript if provided # if config.js_code: @@ -1090,7 +1091,8 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): # Close the page await page.close() - async def _handle_full_page_scan(self, page: Page, scroll_delay: float = 0.1): + # async def _handle_full_page_scan(self, page: Page, scroll_delay: float = 0.1): + async def _handle_full_page_scan(self, page: Page, scroll_delay: float = 0.1, max_scroll_steps: Optional[int] = None): """ Helper method to handle full page scanning. @@ -1105,6 +1107,7 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): Args: page (Page): The Playwright page object scroll_delay (float): The delay between page scrolls + max_scroll_steps (Optional[int]): Maximum number of scroll steps to perform. If None, scrolls until end. """ try: @@ -1129,9 +1132,21 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): dimensions = await self.get_page_dimensions(page) total_height = dimensions["height"] + scroll_step_count = 0 while current_position < total_height: + #### + # NEW FEATURE: Check if we've reached the maximum allowed scroll steps + # This prevents infinite scrolling on very long pages or infinite scroll scenarios + # If max_scroll_steps is None, this check is skipped (unlimited scrolling - original behavior) + #### + if max_scroll_steps is not None and scroll_step_count >= max_scroll_steps: + break current_position = min(current_position + viewport_height, total_height) await self.safe_scroll(page, 0, current_position, delay=scroll_delay) + + # Increment the step counter for max_scroll_steps tracking + scroll_step_count += 1 + # await page.evaluate(f"window.scrollTo(0, {current_position})") # await asyncio.sleep(scroll_delay) diff --git a/tests/general/test_max_scroll.py b/tests/general/test_max_scroll.py new file mode 100644 index 00000000..1cf8908c --- /dev/null +++ b/tests/general/test_max_scroll.py @@ -0,0 +1,115 @@ +""" +Sample script to test the max_scroll_steps parameter implementation +""" +import asyncio +import os +import sys + +# Get the grandparent directory +grandparent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +sys.path.append(grandparent_dir) +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + + + +from crawl4ai import AsyncWebCrawler +from crawl4ai.async_configs import CrawlerRunConfig + +async def test_max_scroll_steps(): + """ + Test the max_scroll_steps parameter with different configurations + """ + print("šŸš€ Testing max_scroll_steps parameter implementation") + print("=" * 60) + + async with AsyncWebCrawler(verbose=True) as crawler: + + # Test 1: Without max_scroll_steps (unlimited scrolling) + print("\\nšŸ“‹ Test 1: Unlimited scrolling (max_scroll_steps=None)") + config1 = CrawlerRunConfig( + scan_full_page=True, + scroll_delay=0.1, + max_scroll_steps=None, # Default behavior + verbose=True + ) + + print(f"Config: scan_full_page={config1.scan_full_page}, max_scroll_steps={config1.max_scroll_steps}") + + try: + result1 = await crawler.arun( + url="https://example.com", # Simple page for testing + config=config1 + ) + print(f"āœ… Test 1 Success: Crawled {len(result1.markdown)} characters") + except Exception as e: + print(f"āŒ Test 1 Failed: {e}") + + # Test 2: With limited scroll steps + print("\\nšŸ“‹ Test 2: Limited scrolling (max_scroll_steps=3)") + config2 = CrawlerRunConfig( + scan_full_page=True, + scroll_delay=0.1, + max_scroll_steps=3, # Limit to 3 scroll steps + verbose=True + ) + + print(f"Config: scan_full_page={config2.scan_full_page}, max_scroll_steps={config2.max_scroll_steps}") + + try: + result2 = await crawler.arun( + url="https://techcrunch.com/", # Another test page + config=config2 + ) + print(f"āœ… Test 2 Success: Crawled {len(result2.markdown)} characters") + except Exception as e: + print(f"āŒ Test 2 Failed: {e}") + + # Test 3: Test serialization/deserialization + print("\\nšŸ“‹ Test 3: Configuration serialization test") + config3 = CrawlerRunConfig( + scan_full_page=True, + max_scroll_steps=5, + scroll_delay=0.2 + ) + + # Test to_dict + config_dict = config3.to_dict() + print(f"Serialized max_scroll_steps: {config_dict.get('max_scroll_steps')}") + + # Test from_kwargs + config4 = CrawlerRunConfig.from_kwargs({ + 'scan_full_page': True, + 'max_scroll_steps': 7, + 'scroll_delay': 0.3 + }) + print(f"Deserialized max_scroll_steps: {config4.max_scroll_steps}") + print("āœ… Test 3 Success: Serialization works correctly") + + # Test 4: Edge case - max_scroll_steps = 0 + print("\\nšŸ“‹ Test 4: Edge case (max_scroll_steps=0)") + config5 = CrawlerRunConfig( + scan_full_page=True, + max_scroll_steps=0, # Should not scroll at all + verbose=True + ) + + try: + result5 = await crawler.arun( + url="https://techcrunch.com/", + config=config5 + ) + print(f"āœ… Test 4 Success: No scrolling performed, crawled {len(result5.markdown)} characters") + except Exception as e: + print(f"āŒ Test 4 Failed: {e}") + + print("\\n" + "=" * 60) + print("šŸŽ‰ All tests completed!") + print("\\nThe max_scroll_steps parameter is working correctly:") + print("- None: Unlimited scrolling (default behavior)") + print("- Positive integer: Limits scroll steps to that number") + print("- 0: No scrolling performed") + print("- Properly serializes/deserializes in config") + +if __name__ == "__main__": + print("Starting max_scroll_steps test...") + asyncio.run(test_max_scroll_steps()) \ No newline at end of file From 74b06d4b8084454253fadff9cebd992168b7b010 Mon Sep 17 00:00:00 2001 From: AHMET YILMAZ Date: Thu, 5 Jun 2025 11:29:35 +0800 Subject: [PATCH 06/11] #1167 Add PHP MIME types to ContentTypeFilter for better file handling --- crawl4ai/deep_crawling/filters.py | 9 ++++ tests/deep_crwaling/test_filter.py | 75 ++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 tests/deep_crwaling/test_filter.py diff --git a/crawl4ai/deep_crawling/filters.py b/crawl4ai/deep_crawling/filters.py index 122be482..8d0bcc4d 100644 --- a/crawl4ai/deep_crawling/filters.py +++ b/crawl4ai/deep_crawling/filters.py @@ -337,6 +337,15 @@ class ContentTypeFilter(URLFilter): "sqlite": "application/vnd.sqlite3", # Placeholder "unknown": "application/octet-stream", # Fallback for unknown file types + # php + "php": "application/x-httpd-php", + "php3": "application/x-httpd-php", + "php4": "application/x-httpd-php", + "php5": "application/x-httpd-php", + "php7": "application/x-httpd-php", + "phtml": "application/x-httpd-php", + "phps": "application/x-httpd-php-source", + } @staticmethod diff --git a/tests/deep_crwaling/test_filter.py b/tests/deep_crwaling/test_filter.py new file mode 100644 index 00000000..29ada087 --- /dev/null +++ b/tests/deep_crwaling/test_filter.py @@ -0,0 +1,75 @@ +# // File: tests/deep_crawling/test_filters.py +import pytest +from urllib.parse import urlparse +from crawl4ai import ContentTypeFilter, URLFilter + +# Minimal URLFilter base class stub if not already importable directly for tests +# In a real scenario, this would be imported from the library +if not hasattr(URLFilter, '_update_stats'): # Check if it's a basic stub + class URLFilter: # Basic stub for testing if needed + def __init__(self, name=None): self.name = name + def apply(self, url: str) -> bool: raise NotImplementedError + def _update_stats(self, passed: bool): pass # Mock implementation + +# Assume ContentTypeFilter is structured as discussed. If its definition is not fully +# available for direct import in the test environment, a more elaborate stub or direct +# instantiation of the real class (if possible) would be needed. +# For this example, we assume ContentTypeFilter can be imported and used. + +class TestContentTypeFilter: + @pytest.mark.parametrize( + "url, allowed_types, expected", + [ + # Existing tests (examples) + ("http://example.com/page.html", ["text/html"], True), + ("http://example.com/page.json", ["application/json"], True), + ("http://example.com/image.png", ["text/html"], False), + ("http://example.com/document.pdf", ["application/pdf"], True), + ("http://example.com/page", ["text/html"], True), # No extension, allowed + ("http://example.com/page", ["text/html"], False), # No extension, disallowed + ("http://example.com/page.unknown", ["text/html"], False), # Unknown extension + + # Tests for PHP extensions + ("http://example.com/index.php", ["application/x-httpd-php"], True), + ("http://example.com/script.php3", ["application/x-httpd-php"], True), + ("http://example.com/legacy.php4", ["application/x-httpd-php"], True), + ("http://example.com/main.php5", ["application/x-httpd-php"], True), + ("http://example.com/api.php7", ["application/x-httpd-php"], True), + ("http://example.com/index.phtml", ["application/x-httpd-php"], True), + ("http://example.com/source.phps", ["application/x-httpd-php-source"], True), + + # Test rejection of PHP extensions + ("http://example.com/index.php", ["text/html"], False), + ("http://example.com/script.php3", ["text/plain"], False), + ("http://example.com/source.phps", ["application/x-httpd-php"], False), # Mismatch MIME + ("http://example.com/source.php", ["application/x-httpd-php-source"], False), # Mismatch MIME for .php + + # Test case-insensitivity of extensions in URL + ("http://example.com/PAGE.HTML", ["text/html"], True), + ("http://example.com/INDEX.PHP", ["application/x-httpd-php"], True), + ("http://example.com/SOURCE.PHPS", ["application/x-httpd-php-source"], True), + + # Test case-insensitivity of allowed_types + ("http://example.com/index.php", ["APPLICATION/X-HTTPD-PHP"], True), + ], + ) + def test_apply(self, url, allowed_types, expected): + content_filter = ContentTypeFilter( + allowed_types=allowed_types + ) + assert content_filter.apply(url) == expected + + @pytest.mark.parametrize( + "url, expected_extension", + [ + ("http://example.com/file.html", "html"), + ("http://example.com/file.tar.gz", "gz"), + ("http://example.com/path/", ""), + ("http://example.com/nodot", ""), + ("http://example.com/.config", "config"), # hidden file with extension + ("http://example.com/path/to/archive.BIG.zip", "zip"), # Case test + ] + ) + def test_extract_extension(self, url, expected_extension): + # Test the static method directly + assert ContentTypeFilter._extract_extension(url) == expected_extension From 9442597f81059365abe6e4fd97845214f51ae525 Mon Sep 17 00:00:00 2001 From: AHMET YILMAZ Date: Tue, 10 Jun 2025 11:57:06 +0800 Subject: [PATCH 07/11] #1127: Improve URL handling and normalization in scraping strategies --- crawl4ai/content_scraping_strategy.py | 48 ++++++++++++-- crawl4ai/utils.py | 93 +++++++++++++++------------ 2 files changed, 96 insertions(+), 45 deletions(-) diff --git a/crawl4ai/content_scraping_strategy.py b/crawl4ai/content_scraping_strategy.py index 1dfbce84..8f6a7d83 100644 --- a/crawl4ai/content_scraping_strategy.py +++ b/crawl4ai/content_scraping_strategy.py @@ -15,7 +15,7 @@ from .config import ( ) from bs4 import NavigableString, Comment from bs4 import PageElement, Tag -from urllib.parse import urljoin +from urllib.parse import urljoin , urlparse from requests.exceptions import InvalidSchema from .utils import ( extract_metadata, @@ -24,8 +24,7 @@ from .utils import ( get_base_domain, extract_metadata_using_lxml, ) -from lxml import etree -from lxml import html as lhtml +from lxml import etree, html as lhtml from typing import List from .models import ScrapingResult, MediaItem, Link, Media, Links import copy @@ -130,7 +129,27 @@ class WebScrapingStrategy(ContentScrapingStrategy): ScrapingResult: A structured result containing the scraped content. """ actual_url = kwargs.get("redirected_url", url) - raw_result = self._scrap(actual_url, html, is_async=False, **kwargs) + # raw_result = self._scrap(actual_url, html, is_async=False, **kwargs) + effective_base_url = actual_url + try: + soup_for_base_check = BeautifulSoup(html, "html.parser") + base_tag = soup_for_base_check.find("base", href=True) + if base_tag: + base_href_val = base_tag.get("href") + if base_href_val is not None: + resolved_base_href = urljoin(actual_url, base_href_val) + parsed_resolved_base = urlparse(resolved_base_href) + if parsed_resolved_base.scheme and parsed_resolved_base.netloc: + effective_base_url = resolved_base_href + except Exception as e: + self._log( + "error", + message="Error resolving base URL: {error}", + tag="SCRAPE", + params={"error": str(e)}, + ) + kwargs_for_scrap = {**kwargs, '_effective_base_url_override': effective_base_url } + raw_result = self._scrap(actual_url, html, is_async=False, **kwargs_for_scrap) if raw_result is None: return ScrapingResult( cleaned_html="", @@ -1487,6 +1506,27 @@ class LXMLWebScrapingStrategy(WebScrapingStrategy): doc = lhtml.document_fromstring(html) # Match BeautifulSoup's behavior of using body or full doc # body = doc.xpath('//body')[0] if doc.xpath('//body') else doc + # Determine effective base URL considering + base_tag_element = doc.find(".//base[@href]") + if base_tag_element is not None: + base_href_value = base_tag_element.get("href") + if base_href_value is not None: + resolved_base_href = urljoin(url, base_href_value) + parse_resolved_base_href = urlparse(resolved_base_href) + if parse_resolved_base_href.scheme and parse_resolved_base_href.netloc: + effective_base_url = resolved_base_href + self._log( + "debug", + f"Using , resolved effective base URL for links: {effective_base_url}", + url=url, # Log against original document URL + tag="SCRAPE_BASE_URL") + else: + effective_base_url = url + self._log( + "warning", + f" resolved to non-absolute URL '{resolved_base_href}'. Using document URL '{actual_url}' as base.", + url=url, # Log against original document URL + tag="SCRAPE_BASE_URL") body = doc base_domain = get_base_domain(url) diff --git a/crawl4ai/utils.py b/crawl4ai/utils.py index d8b366d9..c51fa254 100644 --- a/crawl4ai/utils.py +++ b/crawl4ai/utils.py @@ -15,9 +15,10 @@ from .html2text import html2text, CustomHTML2Text from .config import MIN_WORD_THRESHOLD, IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD, IMAGE_SCORE_THRESHOLD, DEFAULT_PROVIDER, PROVIDER_MODELS import httpx from socket import gaierror -from pathlib import Path +from pathlib import Path , PurePath from typing import Dict, Any, List, Optional, Callable from urllib.parse import urljoin + import requests from requests.exceptions import InvalidSchema import xxhash @@ -2056,18 +2057,29 @@ def fast_format_html(html_string): def normalize_url(href, base_url): """Normalize URLs to ensure consistent format""" from urllib.parse import urljoin, urlparse - + if href is None: + return None + + href_str = str(href).strip() + if not href_str: + # Empty href, conventionally resolves to the base URL itself. + return base_url # Parse base URL to get components + parsed_href = urlparse(href_str) + if parsed_href.scheme and parsed_href.scheme.lower() in ["mailto", "tel", "javascript", "data", "file"]: + # If href is already a full URL, return it as is + return href_str + parsed_base = urlparse(base_url) if not parsed_base.scheme or not parsed_base.netloc: raise ValueError(f"Invalid base URL format: {base_url}") - # Ensure base_url ends with a trailing slash if it's a directory path - if not base_url.endswith('/'): - base_url = base_url + '/' + # # Ensure base_url ends with a trailing slash if it's a directory path + # if not base_url.endswith('/'): + # base_url = base_url + '/' # Use urljoin to handle all cases - normalized = urljoin(base_url, href.strip()) + normalized = urljoin(base_url, href_str) return normalized @@ -2080,7 +2092,7 @@ def normalize_url_for_deep_crawl(href, base_url): return None # Use urljoin to handle relative URLs - full_url = urljoin(base_url, href.strip()) + full_url = urljoin(base_url, str(href).strip()) # Parse the URL for normalization parsed = urlparse(full_url) @@ -2110,7 +2122,7 @@ def normalize_url_for_deep_crawl(href, base_url): normalized = urlunparse(( parsed.scheme, netloc, - parsed.path.rstrip('/'), # Normalize trailing slash + str(PurePath(parsed.path)).rstrip('/'), # Normalize path to remove duplicate slashes parsed.params, query, fragment @@ -2127,7 +2139,7 @@ def efficient_normalize_url_for_deep_crawl(href, base_url): return None # Resolve relative URLs - full_url = urljoin(base_url, href.strip()) + full_url = urljoin(base_url, str(href).strip()) # Use proper URL parsing parsed = urlparse(full_url) @@ -2135,52 +2147,51 @@ def efficient_normalize_url_for_deep_crawl(href, base_url): # Only perform the most critical normalizations # 1. Lowercase hostname # 2. Remove fragment + path = parsed.path + if len(path) > 1 and path.endswith('/'): + path = path.rstrip('/') normalized = urlunparse(( parsed.scheme, parsed.netloc.lower(), - parsed.path.rstrip('/'), - parsed.params, - parsed.query, - '' # Remove fragment )) return normalized -def normalize_url_tmp(href, base_url): - """Normalize URLs to ensure consistent format""" - # Extract protocol and domain from base URL - try: - base_parts = base_url.split("/") - protocol = base_parts[0] - domain = base_parts[2] - except IndexError: - raise ValueError(f"Invalid base URL format: {base_url}") +# def normalize_url_tmp(href, base_url): +# """Normalize URLs to ensure consistent format""" +# # Extract protocol and domain from base URL +# try: +# base_parts = base_url.split("/") +# protocol = base_parts[0] +# domain = base_parts[2] +# except IndexError: +# raise ValueError(f"Invalid base URL format: {base_url}") - # Handle special protocols - special_protocols = {"mailto:", "tel:", "ftp:", "file:", "data:", "javascript:"} - if any(href.lower().startswith(proto) for proto in special_protocols): - return href.strip() +# # Handle special protocols +# special_protocols = {"mailto:", "tel:", "ftp:", "file:", "data:", "javascript:"} +# if any(href.lower().startswith(proto) for proto in special_protocols): +# return href.strip() - # Handle anchor links - if href.startswith("#"): - return f"{base_url}{href}" +# # Handle anchor links +# if href.startswith("#"): +# return f"{base_url}{href}" - # Handle protocol-relative URLs - if href.startswith("//"): - return f"{protocol}{href}" +# # Handle protocol-relative URLs +# if href.startswith("//"): +# return f"{protocol}{href}" - # Handle root-relative URLs - if href.startswith("/"): - return f"{protocol}//{domain}{href}" +# # Handle root-relative URLs +# if href.startswith("/"): +# return f"{protocol}//{domain}{href}" - # Handle relative URLs - if not href.startswith(("http://", "https://")): - # Remove leading './' if present - href = href.lstrip("./") - return f"{protocol}//{domain}/{href}" +# # Handle relative URLs +# if not href.startswith(("http://", "https://")): +# # Remove leading './' if present +# href = href.lstrip("./") +# return f"{protocol}//{domain}/{href}" - return href.strip() +# return href.strip() def get_base_domain(url: str) -> str: From 4679ee023d06f94ecad73a963ab23f9f0d08da14 Mon Sep 17 00:00:00 2001 From: ntohidi Date: Tue, 10 Jun 2025 11:19:18 +0200 Subject: [PATCH 08/11] fix: Enhance URLPatternFilter to enforce path boundary checks for prefix matching. ref #1003 --- crawl4ai/deep_crawling/filters.py | 19 +++++-- tests/general/test_url_pattern.py | 85 +++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 tests/general/test_url_pattern.py diff --git a/crawl4ai/deep_crawling/filters.py b/crawl4ai/deep_crawling/filters.py index 8d0bcc4d..b65112e2 100644 --- a/crawl4ai/deep_crawling/filters.py +++ b/crawl4ai/deep_crawling/filters.py @@ -227,10 +227,21 @@ class URLPatternFilter(URLFilter): # Prefix check (/foo/*) if self._simple_prefixes: path = url.split("?")[0] - if any(path.startswith(p) for p in self._simple_prefixes): - result = True - self._update_stats(result) - return not result if self._reverse else result + # if any(path.startswith(p) for p in self._simple_prefixes): + # result = True + # self._update_stats(result) + # return not result if self._reverse else result + #### + # Modified the prefix matching logic to ensure path boundary checking: + # - Check if the matched prefix is followed by a path separator (`/`), query parameter (`?`), fragment (`#`), or is at the end of the path + # - This ensures `/api/` only matches complete path segments, not substrings like `/apiv2/` + #### + for prefix in self._simple_prefixes: + if path.startswith(prefix): + if len(path) == len(prefix) or path[len(prefix)] in ['/', '?', '#']: + result = True + self._update_stats(result) + return not result if self._reverse else result # Complex patterns if self._path_patterns: diff --git a/tests/general/test_url_pattern.py b/tests/general/test_url_pattern.py new file mode 100644 index 00000000..3aea14d9 --- /dev/null +++ b/tests/general/test_url_pattern.py @@ -0,0 +1,85 @@ +import sys +import os + +# Get the grandparent directory +grandparent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +sys.path.append(grandparent_dir) +__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) + +import asyncio +from crawl4ai.deep_crawling.filters import URLPatternFilter + + +def test_prefix_boundary_matching(): + """Test that prefix patterns respect path boundaries""" + print("=== Testing URLPatternFilter Prefix Boundary Fix ===") + + filter_obj = URLPatternFilter(patterns=['https://langchain-ai.github.io/langgraph/*']) + + test_cases = [ + ('https://langchain-ai.github.io/langgraph/', True), + ('https://langchain-ai.github.io/langgraph/concepts/', True), + ('https://langchain-ai.github.io/langgraph/tutorials/', True), + ('https://langchain-ai.github.io/langgraph?param=1', True), + ('https://langchain-ai.github.io/langgraph#section', True), + ('https://langchain-ai.github.io/langgraphjs/', False), + ('https://langchain-ai.github.io/langgraphjs/concepts/', False), + ('https://other-site.com/langgraph/', False), + ] + + all_passed = True + for url, expected in test_cases: + result = filter_obj.apply(url) + status = "PASS" if result == expected else "FAIL" + if result != expected: + all_passed = False + print(f"{status:4} | Expected: {expected:5} | Got: {result:5} | {url}") + + return all_passed + + +def test_edge_cases(): + """Test edge cases for path boundary matching""" + print("\n=== Testing Edge Cases ===") + + test_patterns = [ + ('/api/*', [ + ('/api/', True), + ('/api/v1', True), + ('/api?param=1', True), + ('/apiv2/', False), + ('/api_old/', False), + ]), + + ('*/docs/*', [ + ('example.com/docs/', True), + ('example.com/docs/guide', True), + ('example.com/documentation/', False), + ('example.com/docs_old/', False), + ]), + ] + + all_passed = True + for pattern, test_cases in test_patterns: + print(f"\nPattern: {pattern}") + filter_obj = URLPatternFilter(patterns=[pattern]) + + for url, expected in test_cases: + result = filter_obj.apply(url) + status = "PASS" if result == expected else "FAIL" + if result != expected: + all_passed = False + print(f" {status:4} | Expected: {expected:5} | Got: {result:5} | {url}") + + return all_passed + +if __name__ == "__main__": + test1_passed = test_prefix_boundary_matching() + test2_passed = test_edge_cases() + + if test1_passed and test2_passed: + print("\nāœ… All tests passed!") + sys.exit(0) + else: + print("\nāŒ Some tests failed!") + sys.exit(1) From 5d9213a0e9e7686c394385ed50f586b90e0dd6a8 Mon Sep 17 00:00:00 2001 From: ntohidi Date: Thu, 12 Jun 2025 12:21:40 +0200 Subject: [PATCH 09/11] fix: Update JavaScript execution in AsyncPlaywrightCrawlerStrategy to handle script errors and add basic download test case. ref #1215 --- crawl4ai/async_crawler_strategy.py | 23 +++++++++++++++++-- tests/general/test_download_file.py | 34 +++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 tests/general/test_download_file.py diff --git a/crawl4ai/async_crawler_strategy.py b/crawl4ai/async_crawler_strategy.py index d349388f..c0bf6ec5 100644 --- a/crawl4ai/async_crawler_strategy.py +++ b/crawl4ai/async_crawler_strategy.py @@ -1596,12 +1596,31 @@ class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): # then wait for the new page to load before continuing result = None try: + # OLD VERSION: + # result = await page.evaluate( + # f""" + # (async () => {{ + # try {{ + # const script_result = {script}; + # return {{ success: true, result: script_result }}; + # }} catch (err) {{ + # return {{ success: false, error: err.toString(), stack: err.stack }}; + # }} + # }})(); + # """ + # ) + + # """ NEW VERSION: + # When {script} contains statements (e.g., const link = …; link.click();), + # this forms invalid JavaScript, causing Playwright execution error: SyntaxError: Unexpected token 'const'. + # """ result = await page.evaluate( f""" (async () => {{ try {{ - const script_result = {script}; - return {{ success: true, result: script_result }}; + return await (async () => {{ + {script} + }})(); }} catch (err) {{ return {{ success: false, error: err.toString(), stack: err.stack }}; }} diff --git a/tests/general/test_download_file.py b/tests/general/test_download_file.py new file mode 100644 index 00000000..ca552779 --- /dev/null +++ b/tests/general/test_download_file.py @@ -0,0 +1,34 @@ +import asyncio +from crawl4ai import CrawlerRunConfig, AsyncWebCrawler, BrowserConfig +from pathlib import Path +import os + +async def test_basic_download(): + + # Custom folder (otherwise defaults to ~/.crawl4ai/downloads) + downloads_path = os.path.join(Path.home(), ".crawl4ai", "downloads") + os.makedirs(downloads_path, exist_ok=True) + browser_config = BrowserConfig( + accept_downloads=True, + downloads_path=downloads_path + ) + async with AsyncWebCrawler(config=browser_config) as crawler: + run_config = CrawlerRunConfig( + js_code=""" + const link = document.querySelector('a[href$=".exe"]'); + if (link) { link.click(); } + """, + delay_before_return_html=5 + ) + result = await crawler.arun("https://www.python.org/downloads/", config=run_config) + + if result.downloaded_files: + print("Downloaded files:") + for file_path in result.downloaded_files: + print("•", file_path) + else: + print("No files downloaded.") + +if __name__ == "__main__": + asyncio.run(test_basic_download()) + \ No newline at end of file From dc8548118038f99c1c3b5c159a6d2ec71e1fbbcf Mon Sep 17 00:00:00 2001 From: ntohidi Date: Thu, 12 Jun 2025 12:23:03 +0200 Subject: [PATCH 10/11] refactor: Update LLM extraction example with the updated structure --- .../examples/llm_extraction_openai_pricing.py | 72 +++++++++++-------- 1 file changed, 42 insertions(+), 30 deletions(-) diff --git a/docs/examples/llm_extraction_openai_pricing.py b/docs/examples/llm_extraction_openai_pricing.py index 27a1c310..de9c1c4a 100644 --- a/docs/examples/llm_extraction_openai_pricing.py +++ b/docs/examples/llm_extraction_openai_pricing.py @@ -1,43 +1,55 @@ -from crawl4ai import LLMConfig -from crawl4ai import AsyncWebCrawler, LLMExtractionStrategy import asyncio -import os -import json from pydantic import BaseModel, Field - -url = "https://openai.com/api/pricing/" +from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, LLMConfig, BrowserConfig, CacheMode +from crawl4ai.extraction_strategy import LLMExtractionStrategy +from typing import Dict +import os class OpenAIModelFee(BaseModel): model_name: str = Field(..., description="Name of the OpenAI model.") input_fee: str = Field(..., description="Fee for input token for the OpenAI model.") - output_fee: str = Field( - ..., description="Fee for output token for the OpenAI model." + output_fee: str = Field(..., description="Fee for output token for the OpenAI model.") + + +async def extract_structured_data_using_llm(provider: str, api_token: str = None, extra_headers: Dict[str, str] = None): + print(f"\n--- Extracting Structured Data with {provider} ---") + + if api_token is None and provider != "ollama": + print(f"API token is required for {provider}. Skipping this example.") + return + + browser_config = BrowserConfig(headless=True) + + extra_args = {"temperature": 0, "top_p": 0.9, "max_tokens": 2000} + if extra_headers: + extra_args["extra_headers"] = extra_headers + + crawler_config = CrawlerRunConfig( + cache_mode=CacheMode.BYPASS, + word_count_threshold=1, + page_timeout=80000, + extraction_strategy=LLMExtractionStrategy( + llm_config=LLMConfig(provider=provider, api_token=api_token), + schema=OpenAIModelFee.model_json_schema(), + extraction_type="schema", + instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens. + Do not miss any models in the entire content.""", + extra_args=extra_args, + ), ) -async def main(): - # Use AsyncWebCrawler - async with AsyncWebCrawler() as crawler: + async with AsyncWebCrawler(config=browser_config) as crawler: result = await crawler.arun( - url=url, - word_count_threshold=1, - extraction_strategy=LLMExtractionStrategy( - # provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'), - llm_config=LLMConfig(provider="groq/llama-3.1-70b-versatile", api_token=os.getenv("GROQ_API_KEY")), - schema=OpenAIModelFee.model_json_schema(), - extraction_type="schema", - instruction="From the crawled content, extract all mentioned model names along with their " - "fees for input and output tokens. Make sure not to miss anything in the entire content. " - "One extracted model JSON format should look like this: " - '{ "model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens" }', - ), + url="https://openai.com/api/pricing/", + config=crawler_config ) - print("Success:", result.success) - model_fees = json.loads(result.extracted_content) - print(len(model_fees)) - - with open(".data/data.json", "w", encoding="utf-8") as f: - f.write(result.extracted_content) + print(result.extracted_content) -asyncio.run(main()) +if __name__ == "__main__": + asyncio.run( + extract_structured_data_using_llm( + provider="openai/gpt-4o", api_token=os.getenv("OPENAI_API_KEY") + ) + ) From 93323264575aa6c2ffaa518b56e5adb50353b0c3 Mon Sep 17 00:00:00 2001 From: AHMET YILMAZ Date: Mon, 16 Jun 2025 18:18:32 +0800 Subject: [PATCH 11/11] feat: Add PDF parsing documentation and navigation entry --- docs/md_v2/advanced/pdf-parsing.md | 204 +++++++++++++++++++++++++++++ mkdocs.yml | 1 + 2 files changed, 205 insertions(+) create mode 100644 docs/md_v2/advanced/pdf-parsing.md diff --git a/docs/md_v2/advanced/pdf-parsing.md b/docs/md_v2/advanced/pdf-parsing.md new file mode 100644 index 00000000..bf91fa5b --- /dev/null +++ b/docs/md_v2/advanced/pdf-parsing.md @@ -0,0 +1,204 @@ +Okay, here is the Markdown documentation for `PDFCrawlerStrategy` and `PDFContentScrapingStrategy`, formatted for an MkDocs site. + + +# PDF Processing Strategies + +Crawl4AI provides specialized strategies for handling and extracting content from PDF files. These strategies allow you to seamlessly integrate PDF processing into your crawling workflows, whether the PDFs are hosted online or stored locally. + +## `PDFCrawlerStrategy` + +### Overview +`PDFCrawlerStrategy` is an implementation of `AsyncCrawlerStrategy` designed specifically for PDF documents. Instead of interpreting the input URL as an HTML webpage, this strategy treats it as a pointer to a PDF file. It doesn't perform deep crawling or HTML parsing itself but rather prepares the PDF source for a dedicated PDF scraping strategy. Its primary role is to identify the PDF source (web URL or local file) and pass it along the processing pipeline in a way that `AsyncWebCrawler` can handle. + +### When to Use +Use `PDFCrawlerStrategy` when you need to: +- Process PDF files using the `AsyncWebCrawler`. +- Handle PDFs from both web URLs (e.g., `https://example.com/document.pdf`) and local file paths (e.g., `file:///path/to/your/document.pdf`). +- Integrate PDF content extraction into a unified `CrawlResult` object, allowing consistent handling of PDF data alongside web page data. + +### Key Methods and Their Behavior +- **`__init__(self, logger: AsyncLogger = None)`**: + - Initializes the strategy. + - `logger`: An optional `AsyncLogger` instance (from `crawl4ai.async_logger`) for logging purposes. +- **`async crawl(self, url: str, **kwargs) -> AsyncCrawlResponse`**: + - This method is called by the `AsyncWebCrawler` during the `arun` process. + - It takes the `url` (which should point to a PDF) and creates a minimal `AsyncCrawlResponse`. + - The `html` attribute of this response is typically empty or a placeholder, as the actual PDF content processing is deferred to the `PDFContentScrapingStrategy` (or a similar PDF-aware scraping strategy). + - It sets `response_headers` to indicate "application/pdf" and `status_code` to 200. +- **`async close(self)`**: + - A method for cleaning up any resources used by the strategy. For `PDFCrawlerStrategy`, this is usually minimal. +- **`async __aenter__(self)` / `async __aexit__(self, exc_type, exc_val, exc_tb)`**: + - Enables asynchronous context management for the strategy, allowing it to be used with `async with`. + +### Example Usage +```python +import asyncio +from crawl4ai import AsyncWebCrawler, CrawlerRunConfig +from crawl4ai.processors.pdf import PDFCrawlerStrategy, PDFContentScrapingStrategy + +async def main(): + # Initialize the PDF crawler strategy + pdf_crawler_strategy = PDFCrawlerStrategy() + + # PDFCrawlerStrategy is typically used in conjunction with PDFContentScrapingStrategy + # The scraping strategy handles the actual PDF content extraction + pdf_scraping_strategy = PDFContentScrapingStrategy() + run_config = CrawlerRunConfig(scraping_strategy=pdf_scraping_strategy) + + async with AsyncWebCrawler(crawler_strategy=pdf_crawler_strategy) as crawler: + # Example with a remote PDF URL + pdf_url = "https://arxiv.org/pdf/2310.06825.pdf" # A public PDF from arXiv + + print(f"Attempting to process PDF: {pdf_url}") + result = await crawler.arun(url=pdf_url, config=run_config) + + if result.success: + print(f"Successfully processed PDF: {result.url}") + print(f"Metadata Title: {result.metadata.get('title', 'N/A')}") + # Further processing of result.markdown, result.media, etc. + # would be done here, based on what PDFContentScrapingStrategy extracts. + if result.markdown and hasattr(result.markdown, 'raw_markdown'): + print(f"Extracted text (first 200 chars): {result.markdown.raw_markdown[:200]}...") + else: + print("No markdown (text) content extracted.") + else: + print(f"Failed to process PDF: {result.error_message}") + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### Pros and Cons +**Pros:** +- Enables `AsyncWebCrawler` to handle PDF sources directly using familiar `arun` calls. +- Provides a consistent interface for specifying PDF sources (URLs or local paths). +- Abstracts the source handling, allowing a separate scraping strategy to focus on PDF content parsing. + +**Cons:** +- Does not perform any PDF data extraction itself; it strictly relies on a compatible scraping strategy (like `PDFContentScrapingStrategy`) to process the PDF. +- Has limited utility on its own; most of its value comes from being paired with a PDF-specific content scraping strategy. + +--- + +## `PDFContentScrapingStrategy` + +### Overview +`PDFContentScrapingStrategy` is an implementation of `ContentScrapingStrategy` designed to extract text, metadata, and optionally images from PDF documents. It is intended to be used in conjunction with a crawler strategy that can provide it with a PDF source, such as `PDFCrawlerStrategy`. This strategy uses the `NaivePDFProcessorStrategy` internally to perform the low-level PDF parsing. + +### When to Use +Use `PDFContentScrapingStrategy` when your `AsyncWebCrawler` (often configured with `PDFCrawlerStrategy`) needs to: +- Extract textual content page by page from a PDF document. +- Retrieve standard metadata embedded within the PDF (e.g., title, author, subject, creation date, page count). +- Optionally, extract images contained within the PDF pages. These images can be saved to a local directory or made available for further processing. +- Produce a `ScrapingResult` that can be converted into a `CrawlResult`, making PDF content accessible in a manner similar to HTML web content (e.g., text in `result.markdown`, metadata in `result.metadata`). + +### Key Configuration Attributes +When initializing `PDFContentScrapingStrategy`, you can configure its behavior using the following attributes: +- **`extract_images: bool = False`**: If `True`, the strategy will attempt to extract images from the PDF. +- **`save_images_locally: bool = False`**: If `True` (and `extract_images` is also `True`), extracted images will be saved to disk in the `image_save_dir`. If `False`, image data might be available in another form (e.g., base64, depending on the underlying processor) but not saved as separate files by this strategy. +- **`image_save_dir: str = None`**: Specifies the directory where extracted images should be saved if `save_images_locally` is `True`. If `None`, a default or temporary directory might be used. +- **`batch_size: int = 4`**: Defines how many PDF pages are processed in a single batch. This can be useful for managing memory when dealing with very large PDF documents. +- **`logger: AsyncLogger = None`**: An optional `AsyncLogger` instance for logging. + +### Key Methods and Their Behavior +- **`__init__(self, save_images_locally: bool = False, extract_images: bool = False, image_save_dir: str = None, batch_size: int = 4, logger: AsyncLogger = None)`**: + - Initializes the strategy with configurations for image handling, batch processing, and logging. It sets up an internal `NaivePDFProcessorStrategy` instance which performs the actual PDF parsing. +- **`scrap(self, url: str, html: str, **params) -> ScrapingResult`**: + - This is the primary synchronous method called by the crawler (via `ascrap`) to process the PDF. + - `url`: The path or URL to the PDF file (provided by `PDFCrawlerStrategy` or similar). + - `html`: Typically an empty string when used with `PDFCrawlerStrategy`, as the content is a PDF, not HTML. + - It first ensures the PDF is accessible locally (downloads it to a temporary file if `url` is remote). + - It then uses its internal PDF processor to extract text, metadata, and images (if configured). + - The extracted information is compiled into a `ScrapingResult` object: + - `cleaned_html`: Contains an HTML-like representation of the PDF, where each page's content is often wrapped in a `
` with page number information. + - `media`: A dictionary where `media["images"]` will contain information about extracted images if `extract_images` was `True`. + - `links`: A dictionary where `links["urls"]` can contain URLs found within the PDF content. + - `metadata`: A dictionary holding PDF metadata (e.g., title, author, num_pages). +- **`async ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult`**: + - The asynchronous version of `scrap`. Under the hood, it typically runs the synchronous `scrap` method in a separate thread using `asyncio.to_thread` to avoid blocking the event loop. +- **`_get_pdf_path(self, url: str) -> str`**: + - A private helper method to manage PDF file access. If the `url` is remote (http/https), it downloads the PDF to a temporary local file and returns its path. If `url` indicates a local file (`file://` or a direct path), it resolves and returns the local path. + +### Example Usage +```python +import asyncio +from crawl4ai import AsyncWebCrawler, CrawlerRunConfig +from crawl4ai.processors.pdf import PDFCrawlerStrategy, PDFContentScrapingStrategy +import os # For creating image directory + +async def main(): + # Define the directory for saving extracted images + image_output_dir = "./my_pdf_images" + os.makedirs(image_output_dir, exist_ok=True) + + # Configure the PDF content scraping strategy + # Enable image extraction and specify where to save them + pdf_scraping_cfg = PDFContentScrapingStrategy( + extract_images=True, + save_images_locally=True, + image_save_dir=image_output_dir, + batch_size=2 # Process 2 pages at a time for demonstration + ) + + # The PDFCrawlerStrategy is needed to tell AsyncWebCrawler how to "crawl" a PDF + pdf_crawler_cfg = PDFCrawlerStrategy() + + # Configure the overall crawl run + run_cfg = CrawlerRunConfig( + scraping_strategy=pdf_scraping_cfg # Use our PDF scraping strategy + ) + + # Initialize the crawler with the PDF-specific crawler strategy + async with AsyncWebCrawler(crawler_strategy=pdf_crawler_cfg) as crawler: + pdf_url = "https://arxiv.org/pdf/2310.06825.pdf" # Example PDF + + print(f"Starting PDF processing for: {pdf_url}") + result = await crawler.arun(url=pdf_url, config=run_cfg) + + if result.success: + print("\n--- PDF Processing Successful ---") + print(f"Processed URL: {result.url}") + + print("\n--- Metadata ---") + for key, value in result.metadata.items(): + print(f" {key.replace('_', ' ').title()}: {value}") + + if result.markdown and hasattr(result.markdown, 'raw_markdown'): + print(f"\n--- Extracted Text (Markdown Snippet) ---") + print(result.markdown.raw_markdown[:500].strip() + "...") + else: + print("\nNo text (markdown) content extracted.") + + if result.media and result.media.get("images"): + print(f"\n--- Image Extraction ---") + print(f"Extracted {len(result.media['images'])} image(s).") + for i, img_info in enumerate(result.media["images"][:2]): # Show info for first 2 images + print(f" Image {i+1}:") + print(f" Page: {img_info.get('page')}") + print(f" Format: {img_info.get('format', 'N/A')}") + if img_info.get('path'): + print(f" Saved at: {img_info.get('path')}") + else: + print("\nNo images were extracted (or extract_images was False).") + else: + print(f"\n--- PDF Processing Failed ---") + print(f"Error: {result.error_message}") + +if __name__ == "__main__": + asyncio.run(main()) +``` + +### Pros and Cons + +**Pros:** +- Provides a comprehensive way to extract text, metadata, and (optionally) images from PDF documents. +- Handles both remote PDFs (via URL) and local PDF files. +- Configurable image extraction allows saving images to disk or accessing their data. +- Integrates smoothly with the `CrawlResult` object structure, making PDF-derived data accessible in a way consistent with web-scraped data. +- The `batch_size` parameter can help in managing memory consumption when processing large or numerous PDF pages. + +**Cons:** +- Extraction quality and performance can vary significantly depending on the PDF's complexity, encoding, and whether it's image-based (scanned) or text-based. +- Image extraction can be resource-intensive (both CPU and disk space if `save_images_locally` is true). +- Relies on `NaivePDFProcessorStrategy` internally, which might have limitations with very complex layouts, encrypted PDFs, or forms compared to more sophisticated PDF parsing libraries. Scanned PDFs will not yield text unless an OCR step is performed (which is not part of this strategy by default). +- Link extraction from PDFs can be basic and depends on how hyperlinks are embedded in the document. diff --git a/mkdocs.yml b/mkdocs.yml index 38b19afe..72e09397 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -43,6 +43,7 @@ nav: - "Identity Based Crawling": "advanced/identity-based-crawling.md" - "SSL Certificate": "advanced/ssl-certificate.md" - "Network & Console Capture": "advanced/network-console-capture.md" + - "PDF Parsing": "advanced/pdf-parsing.md" - Extraction: - "LLM-Free Strategies": "extraction/no-llm-strategies.md" - "LLM Strategies": "extraction/llm-strategies.md"