diff --git a/.gitignore b/.gitignore index d8a63c25..fd1fd196 100644 --- a/.gitignore +++ b/.gitignore @@ -171,4 +171,4 @@ test_pad*.py Crawl4AI.egg-info/ requirements0.txt -a.txt \ No newline at end of file +a.txt diff --git a/README.md b/README.md index d9297e5e..cce04a99 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,9 @@ [![GitHub Pull Requests](https://img.shields.io/github/issues-pr/unclecode/crawl4ai)](https://github.com/unclecode/crawl4ai/pulls) [![License](https://img.shields.io/github/license/unclecode/crawl4ai)](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) -Crawl4AI is a powerful, free web crawling service designed to extract useful information from web pages and make it accessible for large language models (LLMs) and AI applications. πŸ†“πŸŒ +Crawl4AI has one clear task: to simplify crawling and extract useful information from web pages, making it accessible for large language models (LLMs) and AI applications. πŸ†“πŸŒ +<<<<<<< HEAD ## πŸš€ New Changes Will be Released Soon - πŸš€ 10x faster!! @@ -23,8 +24,104 @@ Crawl4AI is a powerful, free web crawling service designed to extract useful inf - πŸ“· Image Captioning: Incorporating image captioning capabilities to extract descriptions from images. - πŸ’Ύ Embedding Vector Data: Generate and store embedding data for each crawled website. - πŸ” Semantic Search Engine: Building a semantic search engine that fetches content, performs vector search similarity, and generates labeled chunk data based on user queries and URLs. +======= +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wz8u30rvbq6Scodye9AGCw8Qg_Z8QGsk) + +## Recent Changes + +- πŸš€ 10x faster!! +- πŸ“œ Execute custom JavaScript before crawling! +- 🀝 Colab friendly! +- πŸ“š Chunking strategies: topic-based, regex, sentence, and more! +- 🧠 Extraction strategies: cosine clustering, LLM, and more! +- 🎯 CSS selector support +- πŸ“ Pass instructions/keywords to refine extraction + +## Power and Simplicity of Crawl4AI πŸš€ + +To show the simplicity take a look at the first example: + +```python +from crawl4ai import WebCrawler + +# Create the WebCrawler instance +crawler = WebCrawler() + +# Run the crawler with keyword filtering and CSS selector +result = crawler.run(url="https://www.nbcnews.com/business") +print(result) # {url, html, markdown, extracted_content, metadata} +``` + +Now let's try a complex task. Below is an example of how you can execute JavaScript, filter data using keywords, and use a CSS selector to extract specific contentβ€”all in one go! + +1. Instantiate a WebCrawler object. +2. Execute custom JavaScript to click a "Load More" button. +3. Extract semantical chunks of content and filter the data to include only content related to technology. +4. Use a CSS selector to extract only paragraphs (`

` tags). + +```python +# Import necessary modules +from crawl4ai import WebCrawler +from crawl4ai.chunking_strategy import * +from crawl4ai.extraction_strategy import * +from crawl4ai.crawler_strategy import * + +# Define the JavaScript code to click the "Load More" button +js_code = """ +const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); +loadMoreButton && loadMoreButton.click(); +""" + +# Define the crawling strategy +crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code) + +# Create the WebCrawler instance with the defined strategy +crawler = WebCrawler(crawler_strategy=crawler_strategy) + +# Run the crawler with keyword filtering and CSS selector +result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=CosineStrategy( + semantic_filter="technology", + ), +) + +# Run the crawler with LLM extraction strategy +result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=LLMExtractionStrategy( + provider="openai/gpt-4o", + api_token=os.getenv('OPENAI_API_KEY'), + instruction="Extract only content related to technology" + ), + css_selector="p" +) + +# Display the extracted result +print(result) +``` + +With Crawl4AI, you can perform advanced web crawling and data extraction tasks with just a few lines of code. This example demonstrates how you can harness the power of Crawl4AI to simplify your workflow and get the data you need efficiently. + +--- + +*Continue reading to learn more about the features, installation process, usage, and more.* + + +## Table of Contents + +1. [Features](#features-) +2. [Installation](#installation-) +3. [REST API/Local Server](#using-the-local-server-ot-rest-api-) +4. [Python Library Usage](#python-library-usage-) +5. [Parameters](#parameters-) +6. [Chunking Strategies](#chunking-strategies-) +7. [Extraction Strategies](#extraction-strategies-) +8. [Contributing](#contributing-) +9. [License](#license-) +10. [Contact](#contact-) +>>>>>>> new-release-0.0.2-no-spacy -For more details, refer to the [CHANGELOG.md](https://github.com/unclecode/crawl4ai/edit/main/CHANGELOG.md) file. ## Features ✨ @@ -33,223 +130,372 @@ For more details, refer to the [CHANGELOG.md](https://github.com/unclecode/crawl - 🌍 Supports crawling multiple URLs simultaneously - πŸŒƒ Replace media tags with ALT. - πŸ†“ Completely free to use and open-source - -## Getting Started πŸš€ - -To get started with Crawl4AI, simply visit our web application at [https://crawl4ai.uccode.io](https://crawl4ai.uccode.io) (Available now!) and enter the URL(s) you want to crawl. The application will process the URLs and provide you with the extracted data in various formats. +- πŸ“œ Execute custom JavaScript before crawling +- πŸ“š Chunking strategies: topic-based, regex, sentence, and more +- 🧠 Extraction strategies: cosine clustering, LLM, and more +- 🎯 CSS selector support +- πŸ“ Pass instructions/keywords to refine extraction ## Installation πŸ’» -There are two ways to use Crawl4AI: as a library in your Python projects or as a standalone local server. - -### Using Crawl4AI as a Library πŸ“š +There are three ways to use Crawl4AI: +1. As a library (Recommended) +2. As a local server (Docker) or using the REST API +4. As a Google Colab notebook. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wz8u30rvbq6Scodye9AGCw8Qg_Z8QGsk) To install Crawl4AI as a library, follow these steps: 1. Install the package from GitHub: -```sh -pip install git+https://github.com/unclecode/crawl4ai.git +```bash +virtualenv venv +source venv/bin/activate +pip install "crawl4ai[all] @ git+https://github.com/unclecode/crawl4ai.git" ``` -Alternatively, you can clone the repository and install the package locally: -```sh + πŸ’‘ Better to run the following CLI-command to load the required models. This is optional, but it will boost the performance and speed of the crawler. You need to do this only once. + + crawl4ai-download-models + +2. Alternatively, you can clone the repository and install the package locally: +```bash virtualenv venv source venv/bin/activate git clone https://github.com/unclecode/crawl4ai.git cd crawl4ai -pip install -e . +pip install -e .[all] ``` -2. Import the necessary modules in your Python script: -```python -from crawl4ai.web_crawler import WebCrawler -from crawl4ai.models import UrlModel -import os - -crawler = WebCrawler(db_path='crawler_data.db') - -# Single page crawl -single_url = UrlModel(url='https://kidocode.com', forced=False) -result = crawl4ai.fetch_page( - single_url, - provider= "openai/gpt-3.5-turbo", - api_token = os.getenv('OPENAI_API_KEY'), - # Set `extract_blocks_flag` to True to enable the LLM to generate semantically clustered chunks - # and return them as JSON. Depending on the model and data size, this may take up to 1 minute. - # Without this setting, it will take between 5 to 20 seconds. - extract_blocks_flag=False - word_count_threshold=5 # Minimum word count for a HTML tag to be considered as a worthy block -) -print(result.model_dump()) - -# Multiple page crawl -urls = [ - UrlModel(url='http://example.com', forced=False), - UrlModel(url='http://example.org', forced=False) -] -results = crawl4ai.fetch_pages( - urls, - provider= "openai/gpt-3.5-turbo", - api_token = os.getenv('OPENAI_API_KEY'), - extract_blocks_flag=True, - word_count_threshold=5 -) - -for res in results: - print(res.model_dump()) -``` - -Running for the first time will download the chrome driver for selenium. Also creates a SQLite database file `crawler_data.db` in the current directory. This file will store the crawled data for future reference. - -The response model is a `CrawlResponse` object that contains the following attributes: -```python -class CrawlResult(BaseModel): - url: str - html: str - success: bool - cleaned_html: str = None - markdown: str = None - parsed_json: str = None - error_message: str = None -``` - -### Running Crawl4AI as a Local Server πŸš€ - -To run Crawl4AI as a standalone local server, follow these steps: - -1. Clone the repository: -```sh -git clone https://github.com/unclecode/crawl4ai.git -``` - -2. Navigate to the project directory: -```sh -cd crawl4ai -``` - -3. Open `crawler/config.py` and set your favorite LLM provider and API token. - -4. Build the Docker image: -```sh +3. Use docker to run the local server: +```bash docker build -t crawl4ai . -``` - For Mac users, use the following command instead: -```sh -docker build --platform linux/amd64 -t crawl4ai . -``` - -5. Run the Docker container: -```sh +# For Mac users +# docker build --platform linux/amd64 -t crawl4ai . docker run -d -p 8000:80 crawl4ai ``` -6. Access the application at `http://localhost:8000`. +For more information about how to run Crawl4AI as a local server, please refer to the [GitHub repository](https://github.com/unclecode/crawl4ai). -- CURL Example: -Set the api_token to your OpenAI API key or any other provider you are using. -```sh -curl -X POST -H "Content-Type: application/json" -d '{"urls":["https://techcrunch.com/"],"provider_model":"openai/gpt-3.5-turbo","api_token":"your_api_token","include_raw_html":true,"forced":false,"extract_blocks_flag":false,"word_count_threshold":10}' http://localhost:8000/crawl -``` -Set `extract_blocks_flag` to True to enable the LLM to generate semantically clustered chunks and return them as JSON. Depending on the model and data size, this may take up to 1 minute. Without this setting, it will take between 5 to 20 seconds. +## Using the Local server ot REST API 🌐 -- Python Example: -```python -import requests -import os +You can also use Crawl4AI through the REST API. This method allows you to send HTTP requests to the Crawl4AI server and receive structured data in response. The base URL for the API is `https://crawl4ai.com/crawl`. If you run the local server, you can use `http://localhost:8000/crawl`. (Port is dependent on your docker configuration) -url = "http://localhost:8000/crawl" # Replace with the appropriate server URL -data = { - "urls": [ - "https://example.com" - ], - "provider_model": "groq/llama3-70b-8192", - "api_token": "your_api_token", - "include_raw_html": true, - "forced": false, - # Set `extract_blocks_flag` to True to enable the LLM to generate semantically clustered chunks - # and return them as JSON. Depending on the model and data size, this may take up to 1 minute. - # Without this setting, it will take between 5 to 20 seconds. - "extract_blocks_flag": False, - "word_count_threshold": 5 +### Example Usage + +To use the REST API, send a POST request to `https://crawl4ai.com/crawl` with the following parameters in the request body. + +**Example Request:** +```json +{ + "urls": ["https://www.nbcnews.com/business"], + "include_raw_html": false, + "bypass_cache": true, + "word_count_threshold": 5, + "extraction_strategy": "CosineStrategy", + "chunking_strategy": "RegexChunking", + "css_selector": "p", + "verbose": true, + "extraction_strategy_args": { + "semantic_filter": "finance economy and stock market", + "word_count_threshold": 20, + "max_dist": 0.2, + "linkage_method": "ward", + "top_k": 3 + }, + "chunking_strategy_args": { + "patterns": ["\n\n"] + } } - -response = requests.post(url, json=data) - -if response.status_code == 200: - result = response.json()["results"][0] - print("Parsed JSON:") - print(result["parsed_json"]) - print("\nCleaned HTML:") - print(result["cleaned_html"]) - print("\nMarkdown:") - print(result["markdown"]) -else: - print("Error:", response.status_code, response.text) ``` -This code sends a POST request to the Crawl4AI server running on localhost, specifying the target URL (`https://example.com`) and the desired options (`grq_api_token`, `include_raw_html`, and `forced`). The server processes the request and returns the crawled data in JSON format. +**Example Response:** +```json +{ + "status": "success", + "data": [ + { + "url": "https://www.nbcnews.com/business", + "extracted_content": "...", + "html": "...", + "markdown": "...", + "metadata": {...} + } + ] +} +``` -The response from the server includes the parsed JSON, cleaned HTML, and markdown representations of the crawled webpage. You can access and use this data in your Python application as needed. +For more information about the available parameters and their descriptions, refer to the [Parameters](#parameters) section. -Make sure to replace `"http://localhost:8000/crawl"` with the appropriate server URL if your Crawl4AI server is running on a different host or port. -Choose the approach that best suits your needs. If you want to integrate Crawl4AI into your existing Python projects, installing it as a library is the way to go. If you prefer to run Crawl4AI as a standalone service and interact with it via API endpoints, running it as a local server using Docker is the recommended approach. +## Python Library Usage πŸš€ -**Make sure to check the config.py tp set required environment variables.** +πŸ”₯ A great way to try out Crawl4AI is to run `quickstart.py` in the `docs/examples` directory. This script demonstrates how to use Crawl4AI to crawl a website and extract content from it. -That's it! You can now integrate Crawl4AI into your Python projects and leverage its web crawling capabilities. πŸŽ‰ +### Quickstart Guide -## πŸ“– Parameters +Create an instance of WebCrawler and call the `warmup()` function. +```python +crawler = WebCrawler() +crawler.warmup() +``` -| Parameter | Description | Required | Default Value | -|----------------------|-------------------------------------------------------------------------------------------------|----------|---------------| -| `urls` | A list of URLs to crawl and extract data from. | Yes | - | -| `provider_model` | The provider and model to use for extracting relevant information (e.g., "groq/llama3-70b-8192"). | Yes | - | -| `api_token` | Your API token for the specified provider. | Yes | - | -| `include_raw_html` | Whether to include the raw HTML content in the response. | No | `false` | -| `forced` | Whether to force a fresh crawl even if the URL has been previously crawled. | No | `false` | -| `extract_blocks_flag`| Whether to extract semantical blocks of text from the HTML. | No | `false` | -| `word_count_threshold` | The minimum number of words a block must contain to be considered meaningful (minimum value is 5). | No | `5` | +### Understanding 'bypass_cache' and 'include_raw_html' parameters -## πŸ› οΈ Configuration -Crawl4AI allows you to configure various parameters and settings in the `crawler/config.py` file. Here's an example of how you can adjust the parameters: +First crawl (caches the result): +```python +result = crawler.run(url="https://www.nbcnews.com/business") +``` + +Second crawl (Force to crawl again): +```python +result = crawler.run(url="https://www.nbcnews.com/business", bypass_cache=True) +``` + πŸ’‘ Don't forget to set `bypass_cache` to True if you want to try different strategies for the same URL. Otherwise, the cached result will be returned. You can also set `always_by_pass_cache` in constructor to True to always bypass the cache. + +Crawl result without raw HTML content: +```python +result = crawler.run(url="https://www.nbcnews.com/business", include_raw_html=False) +``` + +### Adding a chunking strategy: RegexChunking + +Using RegexChunking: +```python +result = crawler.run( + url="https://www.nbcnews.com/business", + chunking_strategy=RegexChunking(patterns=["\n\n"]) +) +``` + +Using NlpSentenceChunking: +```python +result = crawler.run( + url="https://www.nbcnews.com/business", + chunking_strategy=NlpSentenceChunking() +) +``` + +### Extraction strategy: CosineStrategy + +So far, the extracted content is just the result of chunking. To extract meaningful content, you can use extraction strategies. These strategies cluster consecutive chunks into meaningful blocks, keeping the same order as the text in the HTML. This approach is perfect for use in RAG applications and semantical search queries. + +Using CosineStrategy: +```python +result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=CosineStrategy( + semantic_filter="", + word_count_threshold=10, + max_dist=0.2, + linkage_method="ward", + top_k=3 + ) +) +``` + +You can set `semantic_filter` to filter relevant documents before clustering. Documents are filtered based on their cosine similarity to the keyword filter embedding. ```python -import os -from dotenv import load_dotenv - -load_dotenv() # Load environment variables from .env file - -# Default provider -DEFAULT_PROVIDER = "openai/gpt-4-turbo" - -# Provider-model dictionary -PROVIDER_MODELS = { - "groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"), - "groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"), - "openai/gpt-3.5-turbo": os.getenv("OPENAI_API_KEY"), - "openai/gpt-4-turbo": os.getenv("OPENAI_API_KEY"), - "anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"), - "anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"), - "anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"), -} - -# Chunk token threshold -CHUNK_TOKEN_THRESHOLD = 1000 - -# Threshold for the minimum number of words in an HTML tag to be considered -MIN_WORD_THRESHOLD = 5 +result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=CosineStrategy( + semantic_filter="finance economy and stock market", + word_count_threshold=10, + max_dist=0.2, + linkage_method="ward", + top_k=3 + ) +) ``` -In the `crawler/config.py` file, you can: -- Set the default provider using the `DEFAULT_PROVIDER` variable. -- Add or modify the provider-model dictionary (`PROVIDER_MODELS`) to include your desired providers and their corresponding API keys. Crawl4AI supports various providers such as Groq, OpenAI, Anthropic, and more. You can add any provider supported by LiteLLM, as well as Ollama. -- Adjust the `CHUNK_TOKEN_THRESHOLD` value to control the splitting of web content into chunks for parallel processing. A higher value means fewer chunks and faster processing, but it may cause issues with weaker LLMs during extraction. -- Modify the `MIN_WORD_THRESHOLD` value to set the minimum number of words an HTML tag must contain to be considered a meaningful block. +### Using LLMExtractionStrategy -Make sure to set the appropriate API keys for each provider in the `PROVIDER_MODELS` dictionary. You can either directly provide the API key or use environment variables to store them securely. +Without instructions: +```python +result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=LLMExtractionStrategy( + provider="openai/gpt-4o", + api_token=os.getenv('OPENAI_API_KEY') + ) +) +``` -Remember to update the `crawler/config.py` file based on your specific requirements and the providers you want to use with Crawl4AI. +With instructions: +```python +result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=LLMExtractionStrategy( + provider="openai/gpt-4o", + api_token=os.getenv('OPENAI_API_KEY'), + instruction="I am interested in only financial news" + ) +) +``` + +### Targeted extraction using CSS selector + +Extract only H2 tags: +```python +result = crawler.run( + url="https://www.nbcnews.com/business", + css_selector="h2" +) +``` + +### Passing JavaScript code to click 'Load More' button + +Using JavaScript to click 'Load More' button: +```python +js_code = """ +const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); +loadMoreButton && loadMoreButton.click(); +""" +crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code) +crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True) +result = crawler.run(url="https://www.nbcnews.com/business") +``` + +## Parameters πŸ“– + +| Parameter | Description | Required | Default Value | +|-----------------------|-------------------------------------------------------------------------------------------------------|----------|---------------------| +| `urls` | A list of URLs to crawl and extract data from. | Yes | - | +| `include_raw_html` | Whether to include the raw HTML content in the response. | No | `false` | +| `bypass_cache` | Whether to force a fresh crawl even if the URL has been previously crawled. | No | `false` | +| `word_count_threshold`| The minimum number of words a block must contain to be considered meaningful (minimum value is 5). | No | `5` | +| `extraction_strategy` | The strategy to use for extracting content from the HTML (e.g., "CosineStrategy"). | No | `NoExtractionStrategy` | +| `chunking_strategy` | The strategy to use for chunking the text before processing (e.g., "RegexChunking"). | No | `RegexChunking` | +| `css_selector` | The CSS selector to target specific parts of the HTML for extraction. | No | `None` | +| `verbose` | Whether to enable verbose logging. | No | `true` | + +## Chunking Strategies πŸ“š + +### RegexChunking + +`RegexChunking` is a text chunking strategy that splits a given text into smaller parts using regular expressions. This is useful for preparing large texts for processing by language models, ensuring they are divided into manageable segments. + +**Constructor Parameters:** +- `patterns` (list, optional): A list of regular expression patterns used to split the text. Default is to split by double newlines (`['\n\n']`). + +**Example usage:** +```python +chunker = RegexChunking(patterns=[r'\n\n', r'\. ']) +chunks = chunker.chunk("This is a sample text. It will be split into chunks.") +``` + +### NlpSentenceChunking + +`NlpSentenceChunking` uses a natural language processing model to chunk a given text into sentences. This approach leverages SpaCy to accurately split text based on sentence boundaries. + +**Constructor Parameters:** +- None. + +**Example usage:** +```python +chunker = NlpSentenceChunking() +chunks = chunker.chunk("This is a sample text. It will be split into sentences.") +``` + +### TopicSegmentationChunking + +`TopicSegmentationChunking` uses the TextTiling algorithm to segment a given text into topic-based chunks. This method identifies thematic boundaries in the text. + +**Constructor Parameters:** +- `num_keywords` (int, optional): The number of keywords to extract for each topic segment. Default is `3`. + +**Example usage:** +```python +chunker = TopicSegmentationChunking(num_keywords=3) +chunks = chunker.chunk("This is a sample text. It will be split into topic-based segments.") +``` + +### FixedLengthWordChunking + +`FixedLengthWordChunking` splits a given text into chunks of fixed length, based on the number of words. + +**Constructor Parameters:** +- `chunk_size` (int, optional): The number of words in each chunk. Default is `100`. + +**Example usage:** +```python +chunker = FixedLengthWordChunking(chunk_size=100) +chunks = chunker.chunk("This is a sample text. It will be split into fixed-length word chunks.") +``` + +### SlidingWindowChunking + +`SlidingWindowChunking` uses a sliding window approach to chunk a given text. Each chunk has a fixed length, and the window slides by a specified step size. + +**Constructor Parameters:** +- `window_size` (int, optional): The number of words in each chunk. Default is `100`. +- `step` (int, optional): The number of words to slide the window. Default is `50`. + +**Example usage:** +```python +chunker = SlidingWindowChunking(window_size=100, step=50) +chunks = chunker.chunk("This is a sample text. It will be split using a sliding window approach.") +``` + +## Extraction Strategies 🧠 + +### NoExtractionStrategy + +`NoExtractionStrategy` is a basic extraction strategy that returns the entire HTML content without any modification. It is useful for cases where no specific extraction is required. + +**Constructor Parameters:** +None. + +**Example usage:** +```python +extractor = NoExtractionStrategy() +extracted_content = extractor.extract(url, html) +``` + +### LLMExtractionStrategy + +`LLMExtractionStrategy` uses a Language Model (LLM) to extract meaningful blocks or chunks from the given HTML content. This strategy leverages an external provider for language model completions. + +**Constructor Parameters:** +- `provider` (str, optional): The provider to use for the language model completions. Default is `DEFAULT_PROVIDER` (e.g., openai/gpt-4). +- `api_token` (str, optional): The API token for the provider. If not provided, it will try to load from the environment variable `OPENAI_API_KEY`. +- `instruction` (str, optional): An instruction to guide the LLM on how to perform the extraction. This allows users to specify the type of data they are interested in or set the tone of the response. Default is `None`. + +**Example usage:** +```python +extractor = LLMExtractionStrategy(provider='openai', api_token='your_api_token', instruction='Extract only news about AI.') +extracted_content = extractor.extract(url, html) +``` + +### CosineStrategy + +`CosineStrategy` uses hierarchical clustering based on cosine similarity to extract clusters of text from the given HTML content. This strategy is suitable for identifying related content sections. + +**Constructor Parameters:** +- `semantic_filter` (str, optional): A string containing keywords for filtering relevant documents before clustering. If provided, documents are filtered based on their cosine similarity to the keyword filter embedding. Default is `None`. +- `word_count_threshold` (int, optional): Minimum number of words per cluster. Default is `20`. +- `max_dist` (float, optional): The maximum cophenetic distance on the dendrogram to form clusters. Default is `0.2`. +- `linkage_method` (str, optional): The linkage method for hierarchical clustering. Default is `'ward'`. +- `top_k` (int, optional): Number of top categories to extract. Default is `3`. +- `model_name` (str, optional): The model name for embedding generation. Default is `'BAAI/bge-small-en-v1.5'`. + +**Example usage:** +```python +extractor = CosineStrategy(semantic_filter='finance rental prices', word_count_threshold=10, max_dist=0.2, linkage_method='ward', top_k=3, model_name='BAAI/bge-small-en-v1.5') +extracted_content = extractor.extract(url, html) +``` + +### TopicExtractionStrategy + +`TopicExtractionStrategy` uses the TextTiling algorithm to segment the HTML content into topics and extracts keywords for each segment. This strategy is useful for identifying and summarizing thematic content. + +**Constructor Parameters:** +- `num_keywords` (int, optional): Number of keywords to represent each topic segment. Default is `3`. + +**Example usage:** +```python +extractor = TopicExtractionStrategy(num_keywords=3) +extracted_content = extractor.extract(url, html) +``` ## Contributing 🀝 @@ -273,5 +519,6 @@ If you have any questions, suggestions, or feedback, please feel free to reach o - GitHub: [unclecode](https://github.com/unclecode) - Twitter: [@unclecode](https://twitter.com/unclecode) +- Website: [crawl4ai.com](https://crawl4ai.com) Let's work together to make the web more accessible and useful for AI applications! πŸ’ͺπŸŒπŸ€– diff --git a/crawl4ai/chunking_strategy.py b/crawl4ai/chunking_strategy.py new file mode 100644 index 00000000..6ece75e3 --- /dev/null +++ b/crawl4ai/chunking_strategy.py @@ -0,0 +1,105 @@ +from abc import ABC, abstractmethod +import re +from collections import Counter +import string +from .model_loader import load_nltk_punkt + +# Define the abstract base class for chunking strategies +class ChunkingStrategy(ABC): + + @abstractmethod + def chunk(self, text: str) -> list: + """ + Abstract method to chunk the given text. + """ + pass + +# Regex-based chunking +class RegexChunking(ChunkingStrategy): + def __init__(self, patterns=None): + if patterns is None: + patterns = [r'\n\n'] # Default split pattern + self.patterns = patterns + + def chunk(self, text: str) -> list: + paragraphs = [text] + for pattern in self.patterns: + new_paragraphs = [] + for paragraph in paragraphs: + new_paragraphs.extend(re.split(pattern, paragraph)) + paragraphs = new_paragraphs + return paragraphs + +# NLP-based sentence chunking +class NlpSentenceChunking(ChunkingStrategy): + def __init__(self): + load_nltk_punkt() + pass + + def chunk(self, text: str) -> list: + # Improved regex for sentence splitting + # sentence_endings = re.compile( + # r'(? list: + # Use the TextTilingTokenizer to segment the text + segmented_topics = self.tokenizer.tokenize(text) + return segmented_topics + + def extract_keywords(self, text: str) -> list: + # Tokenize and remove stopwords and punctuation + import nltk as nl + tokens = nl.toknize.word_tokenize(text) + tokens = [token.lower() for token in tokens if token not in nl.corpus.stopwords.words('english') and token not in string.punctuation] + + # Calculate frequency distribution + freq_dist = Counter(tokens) + keywords = [word for word, freq in freq_dist.most_common(self.num_keywords)] + return keywords + + def chunk_with_topics(self, text: str) -> list: + # Segment the text into topics + segments = self.chunk(text) + # Extract keywords for each topic segment + segments_with_topics = [(segment, self.extract_keywords(segment)) for segment in segments] + return segments_with_topics + +# Fixed-length word chunks +class FixedLengthWordChunking(ChunkingStrategy): + def __init__(self, chunk_size=100): + self.chunk_size = chunk_size + + def chunk(self, text: str) -> list: + words = text.split() + return [' '.join(words[i:i + self.chunk_size]) for i in range(0, len(words), self.chunk_size)] + +# Sliding window chunking +class SlidingWindowChunking(ChunkingStrategy): + def __init__(self, window_size=100, step=50): + self.window_size = window_size + self.step = step + + def chunk(self, text: str) -> list: + words = text.split() + chunks = [] + for i in range(0, len(words), self.step): + chunks.append(' '.join(words[i:i + self.window_size])) + return chunks + + diff --git a/crawl4ai/config.py b/crawl4ai/config.py index b29325f1..a20eb547 100644 --- a/crawl4ai/config.py +++ b/crawl4ai/config.py @@ -3,15 +3,17 @@ from dotenv import load_dotenv load_dotenv() # Load environment variables from .env file -# Default provider +# Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy DEFAULT_PROVIDER = "openai/gpt-4-turbo" - -# Provider-model dictionary +MODEL_REPO_BRANCH = "new-release-0.0.2" +# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy PROVIDER_MODELS = { + "ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token "groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"), "groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"), "openai/gpt-3.5-turbo": os.getenv("OPENAI_API_KEY"), "openai/gpt-4-turbo": os.getenv("OPENAI_API_KEY"), + "openai/gpt-4o": os.getenv("OPENAI_API_KEY"), "anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"), "anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"), "anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"), diff --git a/crawl4ai/crawler_strategy.py b/crawl4ai/crawler_strategy.py new file mode 100644 index 00000000..24add103 --- /dev/null +++ b/crawl4ai/crawler_strategy.py @@ -0,0 +1,92 @@ +from abc import ABC, abstractmethod +from selenium import webdriver +from selenium.webdriver.chrome.service import Service +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.chrome.options import Options +from selenium.common.exceptions import InvalidArgumentException + +from typing import List +import requests +import os +from pathlib import Path + +class CrawlerStrategy(ABC): + @abstractmethod + def crawl(self, url: str, **kwargs) -> str: + pass + +class CloudCrawlerStrategy(CrawlerStrategy): + def __init__(self, use_cached_html = False): + super().__init__() + self.use_cached_html = use_cached_html + + def crawl(self, url: str) -> str: + data = { + "urls": [url], + "include_raw_html": True, + "forced": True, + "extract_blocks": False, + } + + response = requests.post("http://crawl4ai.uccode.io/crawl", json=data) + response = response.json() + html = response["results"][0]["html"] + return html + +class LocalSeleniumCrawlerStrategy(CrawlerStrategy): + def __init__(self, use_cached_html=False, js_code=None): + super().__init__() + print("[LOG] πŸš€ Initializing LocalSeleniumCrawlerStrategy") + self.options = Options() + self.options.headless = True + self.options.add_argument("--no-sandbox") + self.options.add_argument("--disable-dev-shm-usage") + self.options.add_argument("--disable-gpu") + self.options.add_argument("--disable-extensions") + self.options.add_argument("--headless") + self.use_cached_html = use_cached_html + self.js_code = js_code + + # chromedriver_autoinstaller.install() + import chromedriver_autoinstaller + self.service = Service(chromedriver_autoinstaller.install()) + self.driver = webdriver.Chrome(service=self.service, options=self.options) + + def crawl(self, url: str) -> str: + if self.use_cached_html: + cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url.replace("/", "_")) + if os.path.exists(cache_file_path): + with open(cache_file_path, "r") as f: + return f.read() + + try: + self.driver.get(url) + WebDriverWait(self.driver, 10).until( + EC.presence_of_all_elements_located((By.TAG_NAME, "html")) + ) + + # Execute JS code if provided + if self.js_code: + self.driver.execute_script(self.js_code) + # Optionally, wait for some condition after executing the JS code + WebDriverWait(self.driver, 10).until( + lambda driver: driver.execute_script("return document.readyState") == "complete" + ) + + html = self.driver.page_source + + # Store in cache + cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url.replace("/", "_")) + with open(cache_file_path, "w") as f: + f.write(html) + + return html + except InvalidArgumentException: + raise InvalidArgumentException(f"Invalid URL {url}") + except Exception as e: + raise Exception(f"Failed to crawl {url}: {str(e)}") + + def quit(self): + self.driver.quit() \ No newline at end of file diff --git a/crawl4ai/database.py b/crawl4ai/database.py index 89048d05..391d3f4f 100644 --- a/crawl4ai/database.py +++ b/crawl4ai/database.py @@ -1,8 +1,16 @@ +import os +from pathlib import Path import sqlite3 from typing import Optional +from typing import Optional, Tuple -def init_db(db_path: str): - conn = sqlite3.connect(db_path) +DB_PATH = os.path.join(Path.home(), ".crawl4ai") +os.makedirs(DB_PATH, exist_ok=True) +DB_PATH = os.path.join(DB_PATH, "crawl4ai.db") + +def init_db(): + global DB_PATH + conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute(''' CREATE TABLE IF NOT EXISTS crawled_data ( @@ -10,52 +18,81 @@ def init_db(db_path: str): html TEXT, cleaned_html TEXT, markdown TEXT, - parsed_json TEXT, + extracted_content TEXT, success BOOLEAN ) ''') conn.commit() conn.close() -def get_cached_url(db_path: str, url: str) -> Optional[tuple]: - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute('SELECT url, html, cleaned_html, markdown, parsed_json, success FROM crawled_data WHERE url = ?', (url,)) - result = cursor.fetchone() - conn.close() - return result +def check_db_path(): + if not DB_PATH: + raise ValueError("Database path is not set or is empty.") -def cache_url(db_path: str, url: str, html: str, cleaned_html: str, markdown: str, parsed_json: str, success: bool): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute(''' - INSERT INTO crawled_data (url, html, cleaned_html, markdown, parsed_json, success) - VALUES (?, ?, ?, ?, ?, ?) - ON CONFLICT(url) DO UPDATE SET - html = excluded.html, - cleaned_html = excluded.cleaned_html, - markdown = excluded.markdown, - parsed_json = excluded.parsed_json, - success = excluded.success - ''', (str(url), html, cleaned_html, markdown, parsed_json, success)) - conn.commit() - conn.close() - -def get_total_count(db_path: str) -> int: +def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, bool]]: + check_db_path() try: - conn = sqlite3.connect(db_path) + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute('SELECT url, html, cleaned_html, markdown, extracted_content, success FROM crawled_data WHERE url = ?', (url,)) + result = cursor.fetchone() + conn.close() + return result + except Exception as e: + print(f"Error retrieving cached URL: {e}") + return None + +def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool): + check_db_path() + try: + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute(''' + INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success) + VALUES (?, ?, ?, ?, ?, ?) + ON CONFLICT(url) DO UPDATE SET + html = excluded.html, + cleaned_html = excluded.cleaned_html, + markdown = excluded.markdown, + extracted_content = excluded.extracted_content, + success = excluded.success + ''', (url, html, cleaned_html, markdown, extracted_content, success)) + conn.commit() + conn.close() + except Exception as e: + print(f"Error caching URL: {e}") + +def get_total_count() -> int: + check_db_path() + try: + conn = sqlite3.connect(DB_PATH) cursor = conn.cursor() cursor.execute('SELECT COUNT(*) FROM crawled_data') result = cursor.fetchone() conn.close() return result[0] except Exception as e: + print(f"Error getting total count: {e}") return 0 - -# Crete function to cler the database -def clear_db(db_path: str): - conn = sqlite3.connect(db_path) - cursor = conn.cursor() - cursor.execute('DELETE FROM crawled_data') - conn.commit() - conn.close() \ No newline at end of file + +def clear_db(): + check_db_path() + try: + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute('DELETE FROM crawled_data') + conn.commit() + conn.close() + except Exception as e: + print(f"Error clearing database: {e}") + +def flush_db(): + check_db_path() + try: + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + cursor.execute('DROP TABLE crawled_data') + conn.commit() + conn.close() + except Exception as e: + print(f"Error flushing database: {e}") \ No newline at end of file diff --git a/crawl4ai/extraction_strategy.py b/crawl4ai/extraction_strategy.py new file mode 100644 index 00000000..8567ea6b --- /dev/null +++ b/crawl4ai/extraction_strategy.py @@ -0,0 +1,466 @@ +from abc import ABC, abstractmethod +from typing import Any, List, Dict, Optional, Union +from concurrent.futures import ThreadPoolExecutor, as_completed +import json, time +# from optimum.intel import IPEXModel +from .prompts import PROMPT_EXTRACT_BLOCKS, PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION +from .config import * +from .utils import * +from functools import partial +from .model_loader import * + + +import numpy as np +class ExtractionStrategy(ABC): + """ + Abstract base class for all extraction strategies. + """ + + def __init__(self, **kwargs): + self.DEL = "<|DEL|>" + self.name = self.__class__.__name__ + self.verbose = kwargs.get("verbose", False) + + @abstractmethod + def extract(self, url: str, html: str, *q, **kwargs) -> List[Dict[str, Any]]: + """ + Extract meaningful blocks or chunks from the given HTML. + + :param url: The URL of the webpage. + :param html: The HTML content of the webpage. + :return: A list of extracted blocks or chunks. + """ + pass + + def run(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: + """ + Process sections of text in parallel by default. + + :param url: The URL of the webpage. + :param sections: List of sections (strings) to process. + :return: A list of processed JSON blocks. + """ + extracted_content = [] + with ThreadPoolExecutor() as executor: + futures = [executor.submit(self.extract, url, section, **kwargs) for section in sections] + for future in as_completed(futures): + extracted_content.extend(future.result()) + return extracted_content +class NoExtractionStrategy(ExtractionStrategy): + def extract(self, url: str, html: str, *q, **kwargs) -> List[Dict[str, Any]]: + return [{"index": 0, "content": html}] + + def run(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: + return [{"index": i, "tags": [], "content": section} for i, section in enumerate(sections)] + +class LLMExtractionStrategy(ExtractionStrategy): + def __init__(self, provider: str = DEFAULT_PROVIDER, api_token: Optional[str] = None, instruction:str = None, **kwargs): + """ + Initialize the strategy with clustering parameters. + + :param provider: The provider to use for extraction. + :param api_token: The API token for the provider. + :param instruction: The instruction to use for the LLM model. + """ + super().__init__() + self.provider = provider + self.api_token = api_token or PROVIDER_MODELS.get(provider, None) or os.getenv("OPENAI_API_KEY") + self.instruction = instruction + self.verbose = kwargs.get("verbose", False) + + if not self.api_token: + raise ValueError("API token must be provided for LLMExtractionStrategy. Update the config.py or set OPENAI_API_KEY environment variable.") + + + def extract(self, url: str, ix:int, html: str) -> List[Dict[str, Any]]: + # print("[LOG] Extracting blocks from URL:", url) + print(f"[LOG] Call LLM for {url} - block index: {ix}") + variable_values = { + "URL": url, + "HTML": escape_json_string(sanitize_html(html)), + } + + if self.instruction: + variable_values["REQUEST"] = self.instruction + + prompt_with_variables = PROMPT_EXTRACT_BLOCKS if not self.instruction else PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION + for variable in variable_values: + prompt_with_variables = prompt_with_variables.replace( + "{" + variable + "}", variable_values[variable] + ) + + response = perform_completion_with_backoff(self.provider, prompt_with_variables, self.api_token) + try: + blocks = extract_xml_data(["blocks"], response.choices[0].message.content)['blocks'] + blocks = json.loads(blocks) + for block in blocks: + block['error'] = False + except Exception as e: + print("Error extracting blocks:", str(e)) + parsed, unparsed = split_and_parse_json_objects(response.choices[0].message.content) + blocks = parsed + if unparsed: + blocks.append({ + "index": 0, + "error": True, + "tags": ["error"], + "content": unparsed + }) + + if self.verbose: + print("[LOG] Extracted", len(blocks), "blocks from URL:", url, "block index:", ix) + return blocks + + def _merge(self, documents): + chunks = [] + sections = [] + total_token_so_far = 0 + + for document in documents: + if total_token_so_far < CHUNK_TOKEN_THRESHOLD: + chunk = document.split(' ') + total_token_so_far += len(chunk) * 1.3 + chunks.append(document) + else: + sections.append('\n\n'.join(chunks)) + chunks = [document] + total_token_so_far = len(document.split(' ')) * 1.3 + + if chunks: + sections.append('\n\n'.join(chunks)) + + return sections + + def run(self, url: str, sections: List[str]) -> List[Dict[str, Any]]: + """ + Process sections sequentially with a delay for rate limiting issues, specifically for LLMExtractionStrategy. + """ + + merged_sections = self._merge(sections) + extracted_content = [] + if self.provider.startswith("groq/"): + # Sequential processing with a delay + for ix, section in enumerate(merged_sections): + extracted_content.extend(self.extract(ix, url, section)) + time.sleep(0.5) # 500 ms delay between each processing + else: + # Parallel processing using ThreadPoolExecutor + with ThreadPoolExecutor(max_workers=4) as executor: + extract_func = partial(self.extract, url) + futures = [executor.submit(extract_func, ix, section) for ix, section in enumerate(merged_sections)] + + for future in as_completed(futures): + extracted_content.extend(future.result()) + + + return extracted_content + +class CosineStrategy(ExtractionStrategy): + def __init__(self, semantic_filter = None, word_count_threshold=10, max_dist=0.2, linkage_method='ward', top_k=3, model_name = 'BAAI/bge-small-en-v1.5', **kwargs): + """ + Initialize the strategy with clustering parameters. + + :param semantic_filter: A keyword filter for document filtering. + :param word_count_threshold: Minimum number of words per cluster. + :param max_dist: The maximum cophenetic distance on the dendrogram to form clusters. + :param linkage_method: The linkage method for hierarchical clustering. + :param top_k: Number of top categories to extract. + """ + super().__init__() + + self.semantic_filter = semantic_filter + self.word_count_threshold = word_count_threshold + self.max_dist = max_dist + self.linkage_method = linkage_method + self.top_k = top_k + self.timer = time.time() + self.verbose = kwargs.get("verbose", False) + + self.buffer_embeddings = np.array([]) + + if model_name == "bert-base-uncased": + self.tokenizer, self.model = load_bert_base_uncased() + elif model_name == "BAAI/bge-small-en-v1.5": + self.tokenizer, self.model = load_bge_small_en_v1_5() + + self.nlp = load_text_multilabel_classifier() + + if self.verbose: + print(f"[LOG] Model loaded {model_name}, models/reuters, took " + str(time.time() - self.timer) + " seconds") + + def filter_documents_embeddings(self, documents: List[str], semantic_filter: str, threshold: float = 0.5) -> List[str]: + """ + Filter documents based on the cosine similarity of their embeddings with the semantic_filter embedding. + + :param documents: List of text chunks (documents). + :param semantic_filter: A string containing the keywords for filtering. + :param threshold: Cosine similarity threshold for filtering documents. + :return: Filtered list of documents. + """ + from sklearn.metrics.pairwise import cosine_similarity + if not semantic_filter: + return documents + # Compute embedding for the keyword filter + query_embedding = self.get_embeddings([semantic_filter])[0] + + # Compute embeddings for the docu ments + document_embeddings = self.get_embeddings(documents) + + # Calculate cosine similarity between the query embedding and document embeddings + similarities = cosine_similarity([query_embedding], document_embeddings).flatten() + + # Filter documents based on the similarity threshold + filtered_docs = [doc for doc, sim in zip(documents, similarities) if sim >= threshold] + + return filtered_docs + + def get_embeddings(self, sentences: List[str], bypass_buffer=True): + """ + Get BERT embeddings for a list of sentences. + + :param sentences: List of text chunks (sentences). + :return: NumPy array of embeddings. + """ + # if self.buffer_embeddings.any() and not bypass_buffer: + # return self.buffer_embeddings + + import torch + # Tokenize sentences and convert to tensor + encoded_input = self.tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') + # Compute token embeddings + with torch.no_grad(): + model_output = self.model(**encoded_input) + + # Get embeddings from the last hidden state (mean pooling) + embeddings = model_output.last_hidden_state.mean(1) + self.buffer_embeddings = embeddings.numpy() + return embeddings.numpy() + + def hierarchical_clustering(self, sentences: List[str]): + """ + Perform hierarchical clustering on sentences and return cluster labels. + + :param sentences: List of text chunks (sentences). + :return: NumPy array of cluster labels. + """ + # Get embeddings + from scipy.cluster.hierarchy import linkage, fcluster + from scipy.spatial.distance import pdist + self.timer = time.time() + embeddings = self.get_embeddings(sentences, bypass_buffer=False) + # print(f"[LOG] πŸš€ Embeddings computed in {time.time() - self.timer:.2f} seconds") + # Compute pairwise cosine distances + distance_matrix = pdist(embeddings, 'cosine') + # Perform agglomerative clustering respecting order + linked = linkage(distance_matrix, method=self.linkage_method) + # Form flat clusters + labels = fcluster(linked, self.max_dist, criterion='distance') + return labels + + def filter_clusters_by_word_count(self, clusters: Dict[int, List[str]]): + """ + Filter clusters to remove those with a word count below the threshold. + + :param clusters: Dictionary of clusters. + :return: Filtered dictionary of clusters. + """ + filtered_clusters = {} + for cluster_id, texts in clusters.items(): + # Concatenate texts for analysis + full_text = " ".join(texts) + # Count words + word_count = len(full_text.split()) + + # Keep clusters with word count above the threshold + if word_count >= self.word_count_threshold: + filtered_clusters[cluster_id] = texts + + return filtered_clusters + + def extract(self, url: str, html: str, *q, **kwargs) -> List[Dict[str, Any]]: + """ + Extract clusters from HTML content using hierarchical clustering. + + :param url: The URL of the webpage. + :param html: The HTML content of the webpage. + :return: A list of dictionaries representing the clusters. + """ + # Assume `html` is a list of text chunks for this strategy + t = time.time() + text_chunks = html.split(self.DEL) # Split by lines or paragraphs as needed + + # Pre-filter documents using embeddings and semantic_filter + text_chunks = self.filter_documents_embeddings(text_chunks, self.semantic_filter) + + if not text_chunks: + return [] + + # Perform clustering + labels = self.hierarchical_clustering(text_chunks) + # print(f"[LOG] πŸš€ Clustering done in {time.time() - t:.2f} seconds") + + # Organize texts by their cluster labels, retaining order + t = time.time() + clusters = {} + for index, label in enumerate(labels): + clusters.setdefault(label, []).append(text_chunks[index]) + + # Filter clusters by word count + filtered_clusters = self.filter_clusters_by_word_count(clusters) + + # Convert filtered clusters to a sorted list of dictionaries + cluster_list = [{"index": int(idx), "tags" : [], "content": " ".join(filtered_clusters[idx])} for idx in sorted(filtered_clusters)] + + labels = self.nlp([cluster['content'] for cluster in cluster_list]) + + for cluster, label in zip(cluster_list, labels): + cluster['tags'] = label + + # Process the text with the loaded model + # for cluster in cluster_list: + # cluster['tags'] = self.nlp(cluster['content'])[0]['label'] + # doc = self.nlp(cluster['content']) + # tok_k = self.top_k + # top_categories = sorted(doc.cats.items(), key=lambda x: x[1], reverse=True)[:tok_k] + # cluster['tags'] = [cat for cat, _ in top_categories] + + # print(f"[LOG] πŸš€ Categorization done in {time.time() - t:.2f} seconds") + + return cluster_list + + def run(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: + """ + Process sections using hierarchical clustering. + + :param url: The URL of the webpage. + :param sections: List of sections (strings) to process. + :param provider: The provider to be used for extraction (not used here). + :param api_token: Optional API token for the provider (not used here). + :return: A list of processed JSON blocks. + """ + # This strategy processes all sections together + + return self.extract(url, self.DEL.join(sections), **kwargs) + +class TopicExtractionStrategy(ExtractionStrategy): + def __init__(self, num_keywords: int = 3, **kwargs): + """ + Initialize the topic extraction strategy with parameters for topic segmentation. + + :param num_keywords: Number of keywords to represent each topic segment. + """ + import nltk + super().__init__() + self.num_keywords = num_keywords + self.tokenizer = nltk.TextTilingTokenizer() + + def extract_keywords(self, text: str) -> List[str]: + """ + Extract keywords from a given text segment using simple frequency analysis. + + :param text: The text segment from which to extract keywords. + :return: A list of keyword strings. + """ + import nltk + # Tokenize the text and compute word frequency + words = nltk.word_tokenize(text) + freq_dist = nltk.FreqDist(words) + # Get the most common words as keywords + keywords = [word for (word, _) in freq_dist.most_common(self.num_keywords)] + return keywords + + def extract(self, url: str, html: str, *q, **kwargs) -> List[Dict[str, Any]]: + """ + Extract topics from HTML content using TextTiling for segmentation and keyword extraction. + + :param url: The URL of the webpage. + :param html: The HTML content of the webpage. + :param provider: The provider to be used for extraction (not used here). + :param api_token: Optional API token for the provider (not used here). + :return: A list of dictionaries representing the topics. + """ + # Use TextTiling to segment the text into topics + segmented_topics = html.split(self.DEL) # Split by lines or paragraphs as needed + + # Prepare the output as a list of dictionaries + topic_list = [] + for i, segment in enumerate(segmented_topics): + # Extract keywords for each segment + keywords = self.extract_keywords(segment) + topic_list.append({ + "index": i, + "content": segment, + "keywords": keywords + }) + + return topic_list + + def run(self, url: str, sections: List[str], *q, **kwargs) -> List[Dict[str, Any]]: + """ + Process sections using topic segmentation and keyword extraction. + + :param url: The URL of the webpage. + :param sections: List of sections (strings) to process. + :param provider: The provider to be used for extraction (not used here). + :param api_token: Optional API token for the provider (not used here). + :return: A list of processed JSON blocks. + """ + # Concatenate sections into a single text for coherent topic segmentation + + + return self.extract(url, self.DEL.join(sections), **kwargs) + +class ContentSummarizationStrategy(ExtractionStrategy): + def __init__(self, model_name: str = "sshleifer/distilbart-cnn-12-6", **kwargs): + """ + Initialize the content summarization strategy with a specific model. + + :param model_name: The model to use for summarization. + """ + from transformers import pipeline + self.summarizer = pipeline("summarization", model=model_name) + + def extract(self, url: str, text: str, provider: str = None, api_token: Optional[str] = None) -> List[Dict[str, Any]]: + """ + Summarize a single section of text. + + :param url: The URL of the webpage. + :param text: A section of text to summarize. + :param provider: The provider to be used for extraction (not used here). + :param api_token: Optional API token for the provider (not used here). + :return: A dictionary with the summary. + """ + try: + summary = self.summarizer(text, max_length=130, min_length=30, do_sample=False) + return {"summary": summary[0]['summary_text']} + except Exception as e: + print(f"Error summarizing text: {e}") + return {"summary": text} # Fallback to original text if summarization fails + + def run(self, url: str, sections: List[str], provider: str = None, api_token: Optional[str] = None) -> List[Dict[str, Any]]: + """ + Process each section in parallel to produce summaries. + + :param url: The URL of the webpage. + :param sections: List of sections (strings) to summarize. + :param provider: The provider to be used for extraction (not used here). + :param api_token: Optional API token for the provider (not used here). + :return: A list of dictionaries with summaries for each section. + """ + # Use a ThreadPoolExecutor to summarize in parallel + summaries = [] + with ThreadPoolExecutor() as executor: + # Create a future for each section's summarization + future_to_section = {executor.submit(self.extract, url, section, provider, api_token): i for i, section in enumerate(sections)} + for future in as_completed(future_to_section): + section_index = future_to_section[future] + try: + summary_result = future.result() + summaries.append((section_index, summary_result)) + except Exception as e: + print(f"Error processing section {section_index}: {e}") + summaries.append((section_index, {"summary": sections[section_index]})) # Fallback to original text + + # Sort summaries by the original section index to maintain order + summaries.sort(key=lambda x: x[0]) + return [summary for _, summary in summaries] \ No newline at end of file diff --git a/crawl4ai/model_loader.py b/crawl4ai/model_loader.py new file mode 100644 index 00000000..3a2b8695 --- /dev/null +++ b/crawl4ai/model_loader.py @@ -0,0 +1,127 @@ +from functools import lru_cache +from pathlib import Path +import subprocess, os +import shutil +from crawl4ai.config import MODEL_REPO_BRANCH +import argparse + +def get_home_folder(): + home_folder = os.path.join(Path.home(), ".crawl4ai") + os.makedirs(home_folder, exist_ok=True) + os.makedirs(f"{home_folder}/cache", exist_ok=True) + os.makedirs(f"{home_folder}/models", exist_ok=True) + return home_folder + +@lru_cache() +def load_bert_base_uncased(): + from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel + tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', resume_download=None) + model = BertModel.from_pretrained('bert-base-uncased', resume_download=None) + return tokenizer, model + +@lru_cache() +def load_bge_small_en_v1_5(): + from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel + tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-small-en-v1.5', resume_download=None) + model = AutoModel.from_pretrained('BAAI/bge-small-en-v1.5', resume_download=None) + model.eval() + return tokenizer, model + +@lru_cache() +def load_text_classifier(): + from transformers import AutoTokenizer, AutoModelForSequenceClassification + from transformers import pipeline + + tokenizer = AutoTokenizer.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news") + model = AutoModelForSequenceClassification.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news") + pipe = pipeline("text-classification", model=model, tokenizer=tokenizer) + + return pipe + +@lru_cache() +def load_text_multilabel_classifier(): + from transformers import AutoModelForSequenceClassification, AutoTokenizer + import numpy as np + from scipy.special import expit + import torch + + MODEL = "cardiffnlp/tweet-topic-21-multi" + tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None) + model = AutoModelForSequenceClassification.from_pretrained(MODEL, resume_download=None) + class_mapping = model.config.id2label + + # Check for available device: CUDA, MPS (for Apple Silicon), or CPU + if torch.cuda.is_available(): + device = torch.device("cuda") + elif torch.backends.mps.is_available(): + device = torch.device("mps") + else: + device = torch.device("cpu") + + model.to(device) + + def _classifier(texts, threshold=0.5, max_length=64): + tokens = tokenizer(texts, return_tensors='pt', padding=True, truncation=True, max_length=max_length) + tokens = {key: val.to(device) for key, val in tokens.items()} # Move tokens to the selected device + + with torch.no_grad(): + output = model(**tokens) + + scores = output.logits.detach().cpu().numpy() + scores = expit(scores) + predictions = (scores >= threshold) * 1 + + batch_labels = [] + for prediction in predictions: + labels = [class_mapping[i] for i, value in enumerate(prediction) if value == 1] + batch_labels.append(labels) + + return batch_labels + + return _classifier + +@lru_cache() +def load_nltk_punkt(): + import nltk + try: + nltk.data.find('tokenizers/punkt') + except LookupError: + nltk.download('punkt') + return nltk.data.find('tokenizers/punkt') + +def download_all_models(remove_existing=False): + """Download all models required for Crawl4AI.""" + if remove_existing: + print("[LOG] Removing existing models...") + home_folder = get_home_folder() + model_folders = [ + os.path.join(home_folder, "models/reuters"), + os.path.join(home_folder, "models"), + ] + for folder in model_folders: + if Path(folder).exists(): + shutil.rmtree(folder) + print("[LOG] Existing models removed.") + + # Load each model to trigger download + print("[LOG] Downloading BERT Base Uncased...") + load_bert_base_uncased() + print("[LOG] Downloading BGE Small EN v1.5...") + load_bge_small_en_v1_5() + print("[LOG] Downloading text classifier...") + load_text_multilabel_classifier + print("[LOG] Downloading custom NLTK Punkt model...") + load_nltk_punkt() + print("[LOG] βœ… All models downloaded successfully.") + +def main(): + print("[LOG] Welcome to the Crawl4AI Model Downloader!") + print("[LOG] This script will download all the models required for Crawl4AI.") + parser = argparse.ArgumentParser(description="Crawl4AI Model Downloader") + parser.add_argument('--remove-existing', action='store_true', help="Remove existing models before downloading") + args = parser.parse_args() + + download_all_models(remove_existing=args.remove_existing) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/crawl4ai/models.py b/crawl4ai/models.py index b9373f78..c2c2d61e 100644 --- a/crawl4ai/models.py +++ b/crawl4ai/models.py @@ -11,5 +11,6 @@ class CrawlResult(BaseModel): success: bool cleaned_html: str = None markdown: str = None - parsed_json: str = None + extracted_content: str = None + metadata: dict = None error_message: str = None \ No newline at end of file diff --git a/crawl4ai/prompts.py b/crawl4ai/prompts.py index be7091bc..e0498ccc 100644 --- a/crawl4ai/prompts.py +++ b/crawl4ai/prompts.py @@ -59,7 +59,7 @@ Please provide your output within tags, like this: Remember, the output should be a complete, parsable JSON wrapped in tags, with no omissions or errors. The JSON objects should semantically break down the content into relevant blocks, maintaining the original order.""" -PROMPT_EXTRACT_BLOCKS = """YHere is the URL of the webpage: +PROMPT_EXTRACT_BLOCKS = """Here is the URL of the webpage: {URL} And here is the cleaned HTML content of that webpage: @@ -107,4 +107,61 @@ Please provide your output within tags, like this: }] +Remember, the output should be a complete, parsable JSON wrapped in tags, with no omissions or errors. The JSON objects should semantically break down the content into relevant blocks, maintaining the original order.""" + +PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION = """Here is the URL of the webpage: +{URL} + +And here is the cleaned HTML content of that webpage: + +{HTML} + + +Your task is to break down this HTML content into semantically relevant blocks, following the provided user's REQUEST, and for each block, generate a JSON object with the following keys: + +- index: an integer representing the index of the block in the content +- content: a list of strings containing the text content of the block + +This is the user's REQUEST, pay attention to it: + +{REQUEST} + + +To generate the JSON objects: + +1. Carefully read through the HTML content and identify logical breaks or shifts in the content that would warrant splitting it into separate blocks. + +2. For each block: + a. Assign it an index based on its order in the content. + b. Analyze the content and generate ONE semantic tag that describe what the block is about. + c. Extract the text content, EXACTLY SAME AS GIVE DATA, clean it up if needed, and store it as a list of strings in the "content" field. + +3. Ensure that the order of the JSON objects matches the order of the blocks as they appear in the original HTML content. + +4. Double-check that each JSON object includes all required keys (index, tag, content) and that the values are in the expected format (integer, list of strings, etc.). + +5. Make sure the generated JSON is complete and parsable, with no errors or omissions. + +6. Make sur to escape any special characters in the HTML content, and also single or double quote to avoid JSON parsing issues. + +7. Never alter the extracted content, just copy and paste it as it is. + +Please provide your output within tags, like this: + + +[{ + "index": 0, + "tags": ["introduction"], + "content": ["This is the first paragraph of the article, which provides an introduction and overview of the main topic."] +}, +{ + "index": 1, + "tags": ["background"], + "content": ["This is the second paragraph, which delves into the history and background of the topic.", + "It provides context and sets the stage for the rest of the article."] +}] + + +**Make sure to follow the user instruction to extract blocks aligin with the instruction.** + Remember, the output should be a complete, parsable JSON wrapped in tags, with no omissions or errors. The JSON objects should semantically break down the content into relevant blocks, maintaining the original order.""" \ No newline at end of file diff --git a/crawl4ai/train.py b/crawl4ai/train.py new file mode 100644 index 00000000..f7e7c1a9 --- /dev/null +++ b/crawl4ai/train.py @@ -0,0 +1,146 @@ +import spacy +from spacy.training import Example +import random +import nltk +from nltk.corpus import reuters +import torch + +def save_spacy_model_as_torch(nlp, model_dir="models/reuters"): + # Extract the TextCategorizer component + textcat = nlp.get_pipe("textcat_multilabel") + + # Convert the weights to a PyTorch state dictionary + state_dict = {name: torch.tensor(param.data) for name, param in textcat.model.named_parameters()} + + # Save the state dictionary + torch.save(state_dict, f"{model_dir}/model_weights.pth") + + # Extract and save the vocabulary + vocab = extract_vocab(nlp) + with open(f"{model_dir}/vocab.txt", "w") as vocab_file: + for word, idx in vocab.items(): + vocab_file.write(f"{word}\t{idx}\n") + + print(f"Model weights and vocabulary saved to: {model_dir}") + +def extract_vocab(nlp): + # Extract vocabulary from the SpaCy model + vocab = {word: i for i, word in enumerate(nlp.vocab.strings)} + return vocab + +nlp = spacy.load("models/reuters") +save_spacy_model_as_torch(nlp, model_dir="models") + +def train_and_save_reuters_model(model_dir="models/reuters"): + # Ensure the Reuters corpus is downloaded + nltk.download('reuters') + nltk.download('punkt') + if not reuters.fileids(): + print("Reuters corpus not found.") + return + + # Load a blank English spaCy model + nlp = spacy.blank("en") + + # Create a TextCategorizer with the ensemble model for multi-label classification + textcat = nlp.add_pipe("textcat_multilabel") + + # Add labels to text classifier + for label in reuters.categories(): + textcat.add_label(label) + + # Prepare training data + train_examples = [] + for fileid in reuters.fileids(): + categories = reuters.categories(fileid) + text = reuters.raw(fileid) + cats = {label: label in categories for label in reuters.categories()} + # Prepare spacy Example objects + doc = nlp.make_doc(text) + example = Example.from_dict(doc, {'cats': cats}) + train_examples.append(example) + + # Initialize the text categorizer with the example objects + nlp.initialize(lambda: train_examples) + + # Train the model + random.seed(1) + spacy.util.fix_random_seed(1) + for i in range(5): # Adjust iterations for better accuracy + random.shuffle(train_examples) + losses = {} + # Create batches of data + batches = spacy.util.minibatch(train_examples, size=8) + for batch in batches: + nlp.update(batch, drop=0.2, losses=losses) + print(f"Losses at iteration {i}: {losses}") + + # Save the trained model + nlp.to_disk(model_dir) + print(f"Model saved to: {model_dir}") + +def train_model(model_dir, additional_epochs=0): + # Load the model if it exists, otherwise start with a blank model + try: + nlp = spacy.load(model_dir) + print("Model loaded from disk.") + except IOError: + print("No existing model found. Starting with a new model.") + nlp = spacy.blank("en") + textcat = nlp.add_pipe("textcat_multilabel") + for label in reuters.categories(): + textcat.add_label(label) + + # Prepare training data + train_examples = [] + for fileid in reuters.fileids(): + categories = reuters.categories(fileid) + text = reuters.raw(fileid) + cats = {label: label in categories for label in reuters.categories()} + doc = nlp.make_doc(text) + example = Example.from_dict(doc, {'cats': cats}) + train_examples.append(example) + + # Initialize the model if it was newly created + if 'textcat_multilabel' not in nlp.pipe_names: + nlp.initialize(lambda: train_examples) + else: + print("Continuing training with existing model.") + + # Train the model + random.seed(1) + spacy.util.fix_random_seed(1) + num_epochs = 5 + additional_epochs + for i in range(num_epochs): + random.shuffle(train_examples) + losses = {} + batches = spacy.util.minibatch(train_examples, size=8) + for batch in batches: + nlp.update(batch, drop=0.2, losses=losses) + print(f"Losses at iteration {i}: {losses}") + + # Save the trained model + nlp.to_disk(model_dir) + print(f"Model saved to: {model_dir}") + +def load_model_and_predict(model_dir, text, tok_k = 3): + # Load the trained model from the specified directory + nlp = spacy.load(model_dir) + + # Process the text with the loaded model + doc = nlp(text) + + # gee top 3 categories + top_categories = sorted(doc.cats.items(), key=lambda x: x[1], reverse=True)[:tok_k] + print(f"Top {tok_k} categories:") + + return top_categories + +if __name__ == "__main__": + train_and_save_reuters_model() + train_model("models/reuters", additional_epochs=5) + model_directory = "reuters_model_10" + print(reuters.categories()) + example_text = "Apple Inc. is reportedly buying a startup for $1 billion" + r =load_model_and_predict(model_directory, example_text) + print(r) \ No newline at end of file diff --git a/crawl4ai/utils.py b/crawl4ai/utils.py index eeb4c12a..cbeca812 100644 --- a/crawl4ai/utils.py +++ b/crawl4ai/utils.py @@ -1,16 +1,26 @@ -import requests +import time +from concurrent.futures import ThreadPoolExecutor, as_completed from bs4 import BeautifulSoup, Comment, element, Tag, NavigableString import html2text import json +import html import re import os -import litellm -from litellm import completion, batch_completion +from html2text import HTML2Text from .prompts import PROMPT_EXTRACT_BLOCKS from .config import * -import re -import html +from pathlib import Path +class InvalidCSSSelectorError(Exception): + pass + + +def get_home_folder(): + home_folder = os.path.join(Path.home(), ".crawl4ai") + os.makedirs(home_folder, exist_ok=True) + os.makedirs(f"{home_folder}/cache", exist_ok=True) + os.makedirs(f"{home_folder}/models", exist_ok=True) + return home_folder def beautify_html(escaped_html): """ @@ -77,7 +87,8 @@ def split_and_parse_json_objects(json_string): def sanitize_html(html): # Replace all weird and special characters with an empty string - sanitized_html = re.sub(r'[^\w\s.,;:!?=\[\]{}()<>\/\\\-"]', '', html) + sanitized_html = html + # sanitized_html = re.sub(r'[^\w\s.,;:!?=\[\]{}()<>\/\\\-"]', '', html) # Escape all double and single quotes sanitized_html = sanitized_html.replace('"', '\\"').replace("'", "\\'") @@ -113,14 +124,52 @@ def escape_json_string(s): return s +class CustomHTML2Text(HTML2Text): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.ignore_links = True + self.inside_pre = False + self.inside_code = False -def get_content_of_website(html, word_count_threshold = MIN_WORD_THRESHOLD): + def handle_tag(self, tag, attrs, start): + if tag == 'pre': + if start: + self.o('```\n') + self.inside_pre = True + else: + self.o('\n```') + self.inside_pre = False + # elif tag == 'code' and not self.inside_pre: + # if start: + # if not self.inside_pre: + # self.o('`') + # self.inside_code = True + # else: + # if not self.inside_pre: + # self.o('`') + # self.inside_code = False + + super().handle_tag(tag, attrs, start) + +def get_content_of_website(html, word_count_threshold = MIN_WORD_THRESHOLD, css_selector = None): try: + if not html: + return None # Parse HTML content with BeautifulSoup soup = BeautifulSoup(html, 'html.parser') # Get the content within the tag body = soup.body + + # If css_selector is provided, extract content based on the selector + if css_selector: + selected_elements = body.select(css_selector) + if not selected_elements: + raise InvalidCSSSelectorError(f"Invalid CSS selector , No elements found for CSS selector: {css_selector}") + div_tag = soup.new_tag('div') + for el in selected_elements: + div_tag.append(el) + body = div_tag # Remove script, style, and other tags that don't carry useful content from body for tag in body.find_all(['script', 'style', 'link', 'meta', 'noscript']): @@ -139,17 +188,28 @@ def get_content_of_website(html, word_count_threshold = MIN_WORD_THRESHOLD): else: img.decompose() + + # Create a function that replace content of all"pre" tage with its inner text + def replace_pre_tags_with_text(node): + for child in node.find_all('pre'): + # set child inner html to its text + child.string = child.get_text() + return node + + # Replace all "pre" tags with their inner text + body = replace_pre_tags_with_text(body) + # Recursively remove empty elements, their parent elements, and elements with word count below threshold - def remove_empty_and_low_word_count_elements(node): + def remove_empty_and_low_word_count_elements(node, word_count_threshold): for child in node.contents: if isinstance(child, element.Tag): - remove_empty_and_low_word_count_elements(child) + remove_empty_and_low_word_count_elements(child, word_count_threshold) word_count = len(child.get_text(strip=True).split()) if (len(child.contents) == 0 and not child.get_text(strip=True)) or word_count < word_count_threshold: child.decompose() return node - body = remove_empty_and_low_word_count_elements(body) + body = remove_empty_and_low_word_count_elements(body, word_count_threshold) def remove_small_text_tags(body: Tag, word_count_threshold: int = MIN_WORD_THRESHOLD): # We'll use a list to collect all tags that don't meet the word count requirement @@ -214,9 +274,11 @@ def get_content_of_website(html, word_count_threshold = MIN_WORD_THRESHOLD): return node body = flatten_nested_elements(body) + + # Remove comments - for comment in soup.find_all(text=lambda text: isinstance(text, Comment)): + for comment in soup.find_all(string=lambda text: isinstance(text, Comment)): comment.extract() # Remove consecutive empty newlines and replace multiple spaces with a single space @@ -228,9 +290,11 @@ def get_content_of_website(html, word_count_threshold = MIN_WORD_THRESHOLD): # Convert cleaned HTML to Markdown h = html2text.HTML2Text() + h = CustomHTML2Text() h.ignore_links = True markdown = h.handle(cleaned_html) - + markdown = markdown.replace(' ```', '```') + # Return the Markdown content return{ 'markdown': markdown, @@ -240,13 +304,7 @@ def get_content_of_website(html, word_count_threshold = MIN_WORD_THRESHOLD): except Exception as e: print('Error processing HTML content:', str(e)) - return None - -# Example usage -# word_count_threshold = 5 # Adjust this value according to your desired threshold -# markdown_content = get_content_of_website(word_count_threshold) -# print(markdown_content) - + raise InvalidCSSSelectorError(f"Invalid CSS selector: {css_selector}") from e def extract_xml_tags(string): tags = re.findall(r'<(\w+)>', string) @@ -265,17 +323,16 @@ def extract_xml_data(tags, string): return data -import time -import litellm - # Function to perform the completion with exponential backoff def perform_completion_with_backoff(provider, prompt_with_variables, api_token): + from litellm import completion + from litellm.exceptions import RateLimitError max_attempts = 3 base_delay = 2 # Base delay in seconds, you can adjust this based on your needs for attempt in range(max_attempts): try: - response = completion( + response =completion( model=provider, messages=[ {"role": "user", "content": prompt_with_variables} @@ -284,7 +341,7 @@ def perform_completion_with_backoff(provider, prompt_with_variables, api_token): api_key=api_token ) return response # Return the successful response - except litellm.exceptions.RateLimitError as e: + except RateLimitError as e: print("Rate limit error:", str(e)) # Check if we have exhausted our max attempts @@ -318,23 +375,6 @@ def extract_blocks(url, html, provider = DEFAULT_PROVIDER, api_token = None): response = perform_completion_with_backoff(provider, prompt_with_variables, api_token) - # try: - # response = completion( - # model = provider, - # messages = [ - # {"role": "user", "content": prompt_with_variables} - # ], - # temperature = 0.01, - # api_key = api_token - # ) - # except litellm.exceptions.RateLimitError as e: - # print("Rate limit error:", str(e)) - # return [{ - # "index": 0, - # "tags": ["error"], - # "content": ["Rate limit error. Please try again later."] - # }] - try: blocks = extract_xml_data(["blocks"], response.choices[0].message.content)['blocks'] blocks = json.loads(blocks) @@ -357,7 +397,7 @@ def extract_blocks(url, html, provider = DEFAULT_PROVIDER, api_token = None): def extract_blocks_batch(batch_data, provider = "groq/llama3-70b-8192", api_token = None): api_token = os.getenv('GROQ_API_KEY', None) if not api_token else api_token - + from litellm import batch_completion messages = [] for url, html in batch_data: @@ -397,4 +437,50 @@ def extract_blocks_batch(batch_data, provider = "groq/llama3-70b-8192", api_toke }] all_blocks.append(blocks) - return sum(all_blocks, []) \ No newline at end of file + return sum(all_blocks, []) + + +def merge_chunks_based_on_token_threshold(chunks, token_threshold): + """ + Merges small chunks into larger ones based on the total token threshold. + + :param chunks: List of text chunks to be merged based on token count. + :param token_threshold: Max number of tokens for each merged chunk. + :return: List of merged text chunks. + """ + merged_sections = [] + current_chunk = [] + total_token_so_far = 0 + + for chunk in chunks: + chunk_token_count = len(chunk.split()) * 1.3 # Estimate token count with a factor + if total_token_so_far + chunk_token_count < token_threshold: + current_chunk.append(chunk) + total_token_so_far += chunk_token_count + else: + if current_chunk: + merged_sections.append('\n\n'.join(current_chunk)) + current_chunk = [chunk] + total_token_so_far = chunk_token_count + + # Add the last chunk if it exists + if current_chunk: + merged_sections.append('\n\n'.join(current_chunk)) + + return merged_sections + +def process_sections(url: str, sections: list, provider: str, api_token: str) -> list: + extracted_content = [] + if provider.startswith("groq/"): + # Sequential processing with a delay + for section in sections: + extracted_content.extend(extract_blocks(url, section, provider, api_token)) + time.sleep(0.5) # 500 ms delay between each processing + else: + # Parallel processing using ThreadPoolExecutor + with ThreadPoolExecutor() as executor: + futures = [executor.submit(extract_blocks, url, section, provider, api_token) for section in sections] + for future in as_completed(futures): + extracted_content.extend(future.result()) + + return extracted_content \ No newline at end of file diff --git a/crawl4ai/web_crawler.py b/crawl4ai/web_crawler.py index 7385a7d7..564f64f0 100644 --- a/crawl4ai/web_crawler.py +++ b/crawl4ai/web_crawler.py @@ -1,135 +1,209 @@ -import asyncio import os, time -import json -from selenium import webdriver -from selenium.webdriver.chrome.service import Service -from selenium.webdriver.common.by import By -from selenium.webdriver.support.ui import WebDriverWait -from selenium.webdriver.support import expected_conditions as EC -from selenium.webdriver.chrome.options import Options -import chromedriver_autoinstaller -from pydantic import parse_obj_as +os.environ["TOKENIZERS_PARALLELISM"] = "false" +from pathlib import Path + from .models import UrlModel, CrawlResult -from .database import init_db, get_cached_url, cache_url +from .database import init_db, get_cached_url, cache_url, DB_PATH, flush_db from .utils import * +from .chunking_strategy import * +from .extraction_strategy import * +from .crawler_strategy import * from typing import List -from concurrent.futures import ThreadPoolExecutor, as_completed -from .config import * +from concurrent.futures import ThreadPoolExecutor +from .config import * + class WebCrawler: - def __init__(self, db_path: str): - self.db_path = db_path - init_db(self.db_path) - self.options = Options() - self.options.headless = True - self.options.add_argument("--no-sandbox") - self.options.add_argument("--disable-dev-shm-usage") - # make it headless - self.options.add_argument("--headless") + def __init__( + self, + # db_path: str = None, + crawler_strategy: CrawlerStrategy = None, + always_by_pass_cache: bool = False, + ): + # self.db_path = db_path + self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy() + self.always_by_pass_cache = always_by_pass_cache - # Automatically install or update chromedriver - chromedriver_autoinstaller.install() - + # Create the .crawl4ai folder in the user's home directory if it doesn't exist + self.crawl4ai_folder = os.path.join(Path.home(), ".crawl4ai") + os.makedirs(self.crawl4ai_folder, exist_ok=True) + os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True) - def fetch_page(self, url_model: UrlModel, provider: str = DEFAULT_PROVIDER, api_token: str = None, extract_blocks_flag: bool = True, word_count_threshold = MIN_WORD_THRESHOLD) -> CrawlResult: + # If db_path is not provided, use the default path + # if not db_path: + # self.db_path = f"{self.crawl4ai_folder}/crawl4ai.db" + + # flush_db() + init_db() + + self.ready = False + + def warmup(self): + print("[LOG] 🌀️ Warming up the WebCrawler") + result = self.run( + url='https://crawl4ai.uccode.io/', + word_count_threshold=5, + extraction_strategy= NoExtractionStrategy(), + bypass_cache=False, + verbose = False + ) + self.ready = True + print("[LOG] 🌞 WebCrawler is ready to crawl") + + + def fetch_page( + self, + url_model: UrlModel, + provider: str = DEFAULT_PROVIDER, + api_token: str = None, + extract_blocks_flag: bool = True, + word_count_threshold=MIN_WORD_THRESHOLD, + use_cached_html: bool = False, + extraction_strategy: ExtractionStrategy = None, + chunking_strategy: ChunkingStrategy = RegexChunking(), + **kwargs, + ) -> CrawlResult: + return self.run( + url_model.url, + word_count_threshold, + extraction_strategy or NoExtractionStrategy(), + chunking_strategy, + bypass_cache=url_model.forced, + **kwargs, + ) + pass + + + def run( + self, + url: str, + word_count_threshold=MIN_WORD_THRESHOLD, + extraction_strategy: ExtractionStrategy = None, + chunking_strategy: ChunkingStrategy = RegexChunking(), + bypass_cache: bool = False, + css_selector: str = None, + verbose=True, + **kwargs, + ) -> CrawlResult: + extraction_strategy = extraction_strategy or NoExtractionStrategy() + extraction_strategy.verbose = verbose + # Check if extraction strategy is an instance of ExtractionStrategy if not raise an error + if not isinstance(extraction_strategy, ExtractionStrategy): + raise ValueError("Unsupported extraction strategy") + if not isinstance(chunking_strategy, ChunkingStrategy): + raise ValueError("Unsupported chunking strategy") + # make sure word_count_threshold is not lesser than MIN_WORD_THRESHOLD if word_count_threshold < MIN_WORD_THRESHOLD: word_count_threshold = MIN_WORD_THRESHOLD - + # Check cache first - cached = get_cached_url(self.db_path, str(url_model.url)) - if cached and not url_model.forced: - return CrawlResult(**{ - "url": cached[0], - "html": cached[1], - "cleaned_html": cached[2], - "markdown": cached[3], - "parsed_json": cached[4], - "success": cached[5], - "error_message": "" - }) - + if not bypass_cache and not self.always_by_pass_cache: + cached = get_cached_url(url) + if cached: + return CrawlResult( + **{ + "url": cached[0], + "html": cached[1], + "cleaned_html": cached[2], + "markdown": cached[3], + "extracted_content": cached[4], + "success": cached[5], + "error_message": "", + } + ) # Initialize WebDriver for crawling - service = Service(chromedriver_autoinstaller.install()) - driver = webdriver.Chrome(service=service, options=self.options) - - try: - driver.get(str(url_model.url)) - WebDriverWait(driver, 10).until( - EC.presence_of_all_elements_located((By.TAG_NAME, "html")) - ) - html = driver.page_source - success = True - error_message = "" - except Exception as e: - html = "" - success = False - error_message = str(e) - finally: - driver.quit() - + t = time.time() + html = self.crawler_strategy.crawl(url) + success = True + error_message = "" # Extract content from HTML - result = get_content_of_website(html, word_count_threshold) - cleaned_html = result.get('cleaned_html', html) - markdown = result.get('markdown', "") + try: + result = get_content_of_website(html, word_count_threshold, css_selector=css_selector) + if result is None: + raise ValueError(f"Failed to extract content from the website: {url}") + except InvalidCSSSelectorError as e: + raise ValueError(str(e)) - print("Crawling is done πŸš€") + cleaned_html = result.get("cleaned_html", html) + markdown = result.get("markdown", "") - parsed_json = [] - if extract_blocks_flag: - # Split markdown into sections - paragraphs = markdown.split('\n\n') - sections = [] - chunks = [] - total_token_so_far = 0 + # Print a profession LOG style message, show time taken and say crawling is done + if verbose: + print( + f"[LOG] πŸš€ Crawling done for {url}, success: {success}, time taken: {time.time() - t} seconds" + ) - for paragraph in paragraphs: - if total_token_so_far < CHUNK_TOKEN_THRESHOLD: - chunk = paragraph.split(' ') - total_token_so_far += len(chunk) * 1.3 - chunks.append(paragraph) - else: - sections.append('\n\n'.join(chunks)) - chunks = [paragraph] - total_token_so_far = len(paragraph.split(' ')) * 1.3 + extracted_content = [] + if verbose: + print(f"[LOG] πŸ”₯ Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}") + t = time.time() + # Split markdown into sections + sections = chunking_strategy.chunk(markdown) + # sections = merge_chunks_based_on_token_threshold(sections, CHUNK_TOKEN_THRESHOLD) - if chunks: - sections.append('\n\n'.join(chunks)) + extracted_content = extraction_strategy.run( + url, sections, + ) + extracted_content = json.dumps(extracted_content) - # Process sections to extract blocks - parsed_json = [] - if provider.startswith("groq/"): - # Sequential processing with a delay - for section in sections: - parsed_json.extend(extract_blocks(str(url_model.url), section, provider, api_token)) - time.sleep(0.5) # 500 ms delay between each processing - else: - # Parallel processing using ThreadPoolExecutor - with ThreadPoolExecutor() as executor: - futures = [executor.submit(extract_blocks, str(url_model.url), section, provider, api_token) for section in sections] - for future in as_completed(futures): - parsed_json.extend(future.result()) - - parsed_json = json.dumps(parsed_json) - else: - parsed_json = "{}" + if verbose: + print( + f"[LOG] πŸš€ Extraction done for {url}, time taken: {time.time() - t} seconds." + ) # Cache the result cleaned_html = beautify_html(cleaned_html) - cache_url(self.db_path, str(url_model.url), html, cleaned_html, markdown, parsed_json, success) - - return CrawlResult( - url=str(url_model.url), - html=html, - cleaned_html=cleaned_html, - markdown=markdown, - parsed_json=parsed_json, - success=success, - error_message=error_message + cache_url( + url, + html, + cleaned_html, + markdown, + extracted_content, + success, ) - def fetch_pages(self, url_models: List[UrlModel], provider: str = DEFAULT_PROVIDER, api_token: str = None) -> List[CrawlResult]: + return CrawlResult( + url=url, + html=html, + cleaned_html=cleaned_html, + markdown=markdown, + extracted_content=extracted_content, + success=success, + error_message=error_message, + ) + + def fetch_pages( + self, + url_models: List[UrlModel], + provider: str = DEFAULT_PROVIDER, + api_token: str = None, + extract_blocks_flag: bool = True, + word_count_threshold=MIN_WORD_THRESHOLD, + use_cached_html: bool = False, + extraction_strategy: ExtractionStrategy = None, + chunking_strategy: ChunkingStrategy = RegexChunking(), + **kwargs, + ) -> List[CrawlResult]: + extraction_strategy = extraction_strategy or NoExtractionStrategy() + def fetch_page_wrapper(url_model, *args, **kwargs): + return self.fetch_page(url_model, *args, **kwargs) + with ThreadPoolExecutor() as executor: - results = list(executor.map(self.fetch_page, url_models, [provider] * len(url_models), [api_token] * len(url_models))) - return results \ No newline at end of file + results = list( + executor.map( + fetch_page_wrapper, + url_models, + [provider] * len(url_models), + [api_token] * len(url_models), + [extract_blocks_flag] * len(url_models), + [word_count_threshold] * len(url_models), + [use_cached_html] * len(url_models), + [extraction_strategy] * len(url_models), + [chunking_strategy] * len(url_models), + *[kwargs] * len(url_models), + ) + ) + + return results diff --git a/docs/chunking_strategies.json b/docs/chunking_strategies.json new file mode 100644 index 00000000..b0d2a6bc --- /dev/null +++ b/docs/chunking_strategies.json @@ -0,0 +1,12 @@ +{ + "RegexChunking": "### RegexChunking\n\n`RegexChunking` is a text chunking strategy that splits a given text into smaller parts using regular expressions.\nThis is useful for preparing large texts for processing by language models, ensuring they are divided into manageable segments.\n\n#### Constructor Parameters:\n- `patterns` (list, optional): A list of regular expression patterns used to split the text. Default is to split by double newlines (`['\\n\\n']`).\n\n#### Example usage:\n```python\nchunker = RegexChunking(patterns=[r'\\n\\n', r'\\. '])\nchunks = chunker.chunk(\"This is a sample text. It will be split into chunks.\")\n```", + + "NlpSentenceChunking": "### NlpSentenceChunking\n\n`NlpSentenceChunking` uses a natural language processing model to chunk a given text into sentences. This approach leverages SpaCy to accurately split text based on sentence boundaries.\n\n#### Constructor Parameters:\n- None.\n\n#### Example usage:\n```python\nchunker = NlpSentenceChunking()\nchunks = chunker.chunk(\"This is a sample text. It will be split into sentences.\")\n```", + + "TopicSegmentationChunking": "### TopicSegmentationChunking\n\n`TopicSegmentationChunking` uses the TextTiling algorithm to segment a given text into topic-based chunks. This method identifies thematic boundaries in the text.\n\n#### Constructor Parameters:\n- `num_keywords` (int, optional): The number of keywords to extract for each topic segment. Default is `3`.\n\n#### Example usage:\n```python\nchunker = TopicSegmentationChunking(num_keywords=3)\nchunks = chunker.chunk(\"This is a sample text. It will be split into topic-based segments.\")\n```", + + "FixedLengthWordChunking": "### FixedLengthWordChunking\n\n`FixedLengthWordChunking` splits a given text into chunks of fixed length, based on the number of words.\n\n#### Constructor Parameters:\n- `chunk_size` (int, optional): The number of words in each chunk. Default is `100`.\n\n#### Example usage:\n```python\nchunker = FixedLengthWordChunking(chunk_size=100)\nchunks = chunker.chunk(\"This is a sample text. It will be split into fixed-length word chunks.\")\n```", + + "SlidingWindowChunking": "### SlidingWindowChunking\n\n`SlidingWindowChunking` uses a sliding window approach to chunk a given text. Each chunk has a fixed length, and the window slides by a specified step size.\n\n#### Constructor Parameters:\n- `window_size` (int, optional): The number of words in each chunk. Default is `100`.\n- `step` (int, optional): The number of words to slide the window. Default is `50`.\n\n#### Example usage:\n```python\nchunker = SlidingWindowChunking(window_size=100, step=50)\nchunks = chunker.chunk(\"This is a sample text. It will be split using a sliding window approach.\")\n```" + } + \ No newline at end of file diff --git a/docs/examples/quickstart.py b/docs/examples/quickstart.py new file mode 100644 index 00000000..73772c25 --- /dev/null +++ b/docs/examples/quickstart.py @@ -0,0 +1,188 @@ +import os +import time +from crawl4ai.web_crawler import WebCrawler +from crawl4ai.chunking_strategy import * +from crawl4ai.extraction_strategy import * +from crawl4ai.crawler_strategy import * +from rich import print +from rich.console import Console +from functools import lru_cache + +console = Console() + +@lru_cache() +def create_crawler(): + crawler = WebCrawler() + crawler.warmup() + return crawler + +def print_result(result): + # Print each key in one line and just the first 10 characters of each one's value and three dots + console.print(f"\t[bold]Result:[/bold]") + for key, value in result.model_dump().items(): + if isinstance(value, str) and value: + console.print(f"\t{key}: [green]{value[:20]}...[/green]") + if result.extracted_content: + items = json.loads(result.extracted_content) + print(f"\t[bold]{len(items)} blocks is extracted![/bold]") + + +def cprint(message, press_any_key=False): + console.print(message) + if press_any_key: + console.print("Press any key to continue...", style="") + input() + +def basic_usage(crawler): + cprint("πŸ› οΈ [bold cyan]Basic Usage: Simply provide a URL and let Crawl4ai do the magic![/bold cyan]") + result = crawler.run(url="https://www.nbcnews.com/business") + cprint("[LOG] πŸ“¦ [bold yellow]Basic crawl result:[/bold yellow]") + print_result(result) + +def understanding_parameters(crawler): + cprint("\n🧠 [bold cyan]Understanding 'bypass_cache' and 'include_raw_html' parameters:[/bold cyan]") + cprint("By default, Crawl4ai caches the results of your crawls. This means that subsequent crawls of the same URL will be much faster! Let's see this in action.") + + # First crawl (reads from cache) + cprint("1️⃣ First crawl (caches the result):", True) + start_time = time.time() + result = crawler.run(url="https://www.nbcnews.com/business") + end_time = time.time() + cprint(f"[LOG] πŸ“¦ [bold yellow]First crawl took {end_time - start_time} seconds and result (from cache):[/bold yellow]") + print_result(result) + + # Force to crawl again + cprint("2️⃣ Second crawl (Force to crawl again):", True) + start_time = time.time() + result = crawler.run(url="https://www.nbcnews.com/business", bypass_cache=True) + end_time = time.time() + cprint(f"[LOG] πŸ“¦ [bold yellow]Second crawl took {end_time - start_time} seconds and result (forced to crawl):[/bold yellow]") + print_result(result) + +def add_chunking_strategy(crawler): + # Adding a chunking strategy: RegexChunking + cprint("\n🧩 [bold cyan]Let's add a chunking strategy: RegexChunking![/bold cyan]", True) + cprint("RegexChunking is a simple chunking strategy that splits the text based on a given regex pattern. Let's see it in action!") + result = crawler.run( + url="https://www.nbcnews.com/business", + chunking_strategy=RegexChunking(patterns=["\n\n"]) + ) + cprint("[LOG] πŸ“¦ [bold yellow]RegexChunking result:[/bold yellow]") + print_result(result) + + # Adding another chunking strategy: NlpSentenceChunking + cprint("\nπŸ” [bold cyan]Time to explore another chunking strategy: NlpSentenceChunking![/bold cyan]", True) + cprint("NlpSentenceChunking uses NLP techniques to split the text into sentences. Let's see how it performs!") + result = crawler.run( + url="https://www.nbcnews.com/business", + chunking_strategy=NlpSentenceChunking() + ) + cprint("[LOG] πŸ“¦ [bold yellow]NlpSentenceChunking result:[/bold yellow]") + print_result(result) + +def add_extraction_strategy(crawler): + # Adding an extraction strategy: CosineStrategy + cprint("\n🧠 [bold cyan]Let's get smarter with an extraction strategy: CosineStrategy![/bold cyan]", True) + cprint("CosineStrategy uses cosine similarity to extract semantically similar blocks of text. Let's see it in action!") + result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3) + ) + cprint("[LOG] πŸ“¦ [bold yellow]CosineStrategy result:[/bold yellow]") + print_result(result) + + # Using semantic_filter with CosineStrategy + cprint("You can pass other parameters like 'semantic_filter' to the CosineStrategy to extract semantically similar blocks of text. Let's see it in action!") + result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=CosineStrategy( + semantic_filter="inflation rent prices", + ) + ) + cprint("[LOG] πŸ“¦ [bold yellow]CosineStrategy result with semantic filter:[/bold yellow]") + print_result(result) + +def add_llm_extraction_strategy(crawler): + # Adding an LLM extraction strategy without instructions + cprint("\nπŸ€– [bold cyan]Time to bring in the big guns: LLMExtractionStrategy without instructions![/bold cyan]", True) + cprint("LLMExtractionStrategy uses a large language model to extract relevant information from the web page. Let's see it in action!") + result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=LLMExtractionStrategy(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')) + ) + cprint("[LOG] πŸ“¦ [bold yellow]LLMExtractionStrategy (no instructions) result:[/bold yellow]") + print_result(result) + + # Adding an LLM extraction strategy with instructions + cprint("\nπŸ“œ [bold cyan]Let's make it even more interesting: LLMExtractionStrategy with instructions![/bold cyan]", True) + cprint("Let's say we are only interested in financial news. Let's see how LLMExtractionStrategy performs with instructions!") + result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=LLMExtractionStrategy( + provider="openai/gpt-4o", + api_token=os.getenv('OPENAI_API_KEY'), + instruction="I am interested in only financial news" + ) + ) + cprint("[LOG] πŸ“¦ [bold yellow]LLMExtractionStrategy (with instructions) result:[/bold yellow]") + print_result(result) + + result = crawler.run( + url="https://www.nbcnews.com/business", + extraction_strategy=LLMExtractionStrategy( + provider="openai/gpt-4o", + api_token=os.getenv('OPENAI_API_KEY'), + instruction="Extract only content related to technology" + ) + ) + cprint("[LOG] πŸ“¦ [bold yellow]LLMExtractionStrategy (with technology instruction) result:[/bold yellow]") + print_result(result) + +def targeted_extraction(crawler): + # Using a CSS selector to extract only H2 tags + cprint("\n🎯 [bold cyan]Targeted extraction: Let's use a CSS selector to extract only H2 tags![/bold cyan]", True) + result = crawler.run( + url="https://www.nbcnews.com/business", + css_selector="h2" + ) + cprint("[LOG] πŸ“¦ [bold yellow]CSS Selector (H2 tags) result:[/bold yellow]") + print_result(result) + +def interactive_extraction(crawler): + # Passing JavaScript code to interact with the page + cprint("\nπŸ–±οΈ [bold cyan]Let's get interactive: Passing JavaScript code to click 'Load More' button![/bold cyan]", True) + cprint("In this example we try to click the 'Load More' button on the page using JavaScript code.") + js_code = """ + const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); + loadMoreButton && loadMoreButton.click(); + """ + crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code) + crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True) + result = crawler.run( + url="https://www.nbcnews.com/business", + ) + cprint("[LOG] πŸ“¦ [bold yellow]JavaScript Code (Load More button) result:[/bold yellow]") + print_result(result) + +def main(): + cprint("🌟 [bold green]Welcome to the Crawl4ai Quickstart Guide! Let's dive into some web crawling fun! 🌐[/bold green]") + cprint("⛳️ [bold cyan]First Step: Create an instance of WebCrawler and call the `warmup()` function.[/bold cyan]") + cprint("If this is the first time you're running Crawl4ai, this might take a few seconds to load required model files.") + + crawler = create_crawler() + + basic_usage(crawler) + understanding_parameters(crawler) + + crawler.always_by_pass_cache = True + add_chunking_strategy(crawler) + add_extraction_strategy(crawler) + add_llm_extraction_strategy(crawler) + targeted_extraction(crawler) + interactive_extraction(crawler) + + cprint("\nπŸŽ‰ [bold green]Congratulations! You've made it through the Crawl4ai Quickstart Guide! Now go forth and crawl the web like a pro! πŸ•ΈοΈ[/bold green]") + +if __name__ == "__main__": + main() + diff --git a/docs/extraction_strategies.json b/docs/extraction_strategies.json new file mode 100644 index 00000000..570e1e32 --- /dev/null +++ b/docs/extraction_strategies.json @@ -0,0 +1,10 @@ +{ + "NoExtractionStrategy": "### NoExtractionStrategy\n\n`NoExtractionStrategy` is a basic extraction strategy that returns the entire HTML content without any modification. It is useful for cases where no specific extraction is required. Only clean html, and amrkdown.\n\n#### Constructor Parameters:\nNone.\n\n#### Example usage:\n```python\nextractor = NoExtractionStrategy()\nextracted_content = extractor.extract(url, html)\n```", + + "LLMExtractionStrategy": "### LLMExtractionStrategy\n\n`LLMExtractionStrategy` uses a Language Model (LLM) to extract meaningful blocks or chunks from the given HTML content. This strategy leverages an external provider for language model completions.\n\n#### Constructor Parameters:\n- `provider` (str, optional): The provider to use for the language model completions. Default is `DEFAULT_PROVIDER` (e.g., openai/gpt-4).\n- `api_token` (str, optional): The API token for the provider. If not provided, it will try to load from the environment variable `OPENAI_API_KEY`.\n- `instruction` (str, optional): An instruction to guide the LLM on how to perform the extraction. This allows users to specify the type of data they are interested in or set the tone of the response. Default is `None`.\n\n#### Example usage:\n```python\nextractor = LLMExtractionStrategy(provider='openai', api_token='your_api_token', instruction='Extract only news about AI.')\nextracted_content = extractor.extract(url, html)\n```\n\nBy providing clear instructions, users can tailor the extraction process to their specific needs, enhancing the relevance and utility of the extracted content.", + + "CosineStrategy": "### CosineStrategy\n\n`CosineStrategy` uses hierarchical clustering based on cosine similarity to extract clusters of text from the given HTML content. This strategy is suitable for identifying related content sections.\n\n#### Constructor Parameters:\n- `semantic_filter` (str, optional): A string containing keywords for filtering relevant documents before clustering. If provided, documents are filtered based on their cosine similarity to the keyword filter embedding. Default is `None`.\n- `word_count_threshold` (int, optional): Minimum number of words per cluster. Default is `20`.\n- `max_dist` (float, optional): The maximum cophenetic distance on the dendrogram to form clusters. Default is `0.2`.\n- `linkage_method` (str, optional): The linkage method for hierarchical clustering. Default is `'ward'`.\n- `top_k` (int, optional): Number of top categories to extract. Default is `3`.\n- `model_name` (str, optional): The model name for embedding generation. Default is `'BAAI/bge-small-en-v1.5'`.\n\n#### Example usage:\n```python\nextractor = CosineStrategy(semantic_filter='artificial intelligence', word_count_threshold=10, max_dist=0.2, linkage_method='ward', top_k=3, model_name='BAAI/bge-small-en-v1.5')\nextracted_content = extractor.extract(url, html)\n```\n\n#### Cosine Similarity Filtering\n\nWhen a `semantic_filter` is provided, the `CosineStrategy` applies an embedding-based filtering process to select relevant documents before performing hierarchical clustering.", + + "TopicExtractionStrategy": "### TopicExtractionStrategy\n\n`TopicExtractionStrategy` uses the TextTiling algorithm to segment the HTML content into topics and extracts keywords for each segment. This strategy is useful for identifying and summarizing thematic content.\n\n#### Constructor Parameters:\n- `num_keywords` (int, optional): Number of keywords to represent each topic segment. Default is `3`.\n\n#### Example usage:\n```python\nextractor = TopicExtractionStrategy(num_keywords=3)\nextracted_content = extractor.extract(url, html)\n```" + } + \ No newline at end of file diff --git a/examples/quickstart.py b/examples/quickstart.py deleted file mode 100644 index 17ff8464..00000000 --- a/examples/quickstart.py +++ /dev/null @@ -1,31 +0,0 @@ -from crawl4ai.web_crawler import WebCrawler -from crawl4ai.models import UrlModel -from crawl4ai.utils import get_content_of_website -import os - -def main(): - # Initialize the WebCrawler with just the database path - crawler = WebCrawler(db_path='crawler_data.db') - - # Fetch a single page - single_url = UrlModel(url='https://www.nbcnews.com/business', forced=False) - result = crawler.fetch_page( - single_url, - provider= "openai/gpt-3.5-turbo", - api_token = os.getenv('OPENAI_API_KEY'), - extract_blocks_flag=True, - word_count_threshold=10 - ) - print(result.model_dump()) - - # Fetch multiple pages - # urls = [ - # UrlModel(url='http://example.com', forced=False), - # UrlModel(url='http://example.org', forced=False) - # ] - # results = crawler.fetch_pages(urls, provider= "openai/gpt-4-turbo", api_token = os.getenv('OPENAI_API_KEY')) - # for res in results: - # print(res.model_copy()) - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/main.py b/main.py index 71b43c36..45e37515 100644 --- a/main.py +++ b/main.py @@ -1,24 +1,20 @@ -from fastapi import FastAPI, HTTPException, Request -from fastapi.responses import HTMLResponse -from fastapi.staticfiles import StaticFiles -from fastapi.responses import JSONResponse -from pydantic import BaseModel, HttpUrl -from typing import List, Optional -from crawl4ai.web_crawler import WebCrawler -from crawl4ai.models import UrlModel -import asyncio -from concurrent.futures import ThreadPoolExecutor, as_completed -import chromedriver_autoinstaller -from functools import lru_cache -from crawl4ai.database import get_total_count, clear_db import os -import uuid -# Import the CORS middleware +import importlib +import asyncio +from functools import lru_cache + +from fastapi import FastAPI, HTTPException, Request +from fastapi.responses import HTMLResponse, JSONResponse +from fastapi.staticfiles import StaticFiles from fastapi.middleware.cors import CORSMiddleware +from fastapi.templating import Jinja2Templates +from pydantic import BaseModel, HttpUrl +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import List, Optional -# Task management -tasks = {} +from crawl4ai.web_crawler import WebCrawler +from crawl4ai.database import get_total_count, clear_db # Configuration __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) @@ -40,134 +36,113 @@ app.add_middleware( # Mount the pages directory as a static directory app.mount("/pages", StaticFiles(directory=__location__ + "/pages"), name="pages") - - -chromedriver_autoinstaller.install() # Ensure chromedriver is installed - -class UrlsInput(BaseModel): - urls: List[HttpUrl] - provider_model: str - api_token: str - include_raw_html: Optional[bool] = False - forced: bool = False - extract_blocks: bool = True - word_count_threshold: Optional[int] = 5 - +templates = Jinja2Templates(directory=__location__ + "/pages") +# chromedriver_autoinstaller.install() # Ensure chromedriver is installed @lru_cache() def get_crawler(): # Initialize and return a WebCrawler instance - return WebCrawler(db_path='crawler_data.db') + return WebCrawler() + +class CrawlRequest(BaseModel): + urls: List[str] + include_raw_html: Optional[bool] = False + bypass_cache: bool = False + extract_blocks: bool = True + word_count_threshold: Optional[int] = 5 + extraction_strategy: Optional[str] = "NoExtractionStrategy" + extraction_strategy_args: Optional[dict] = {} + chunking_strategy: Optional[str] = "RegexChunking" + chunking_strategy_args: Optional[dict] = {} + css_selector: Optional[str] = None + verbose: Optional[bool] = True + @app.get("/", response_class=HTMLResponse) -async def read_index(): - with open(f"{__location__}/pages/index.html", "r") as file: - html_content = file.read() - return HTMLResponse(content=html_content, status_code=200) +async def read_index(request: Request): + partials_dir = os.path.join(__location__, "pages", "partial") + partials = {} + + for filename in os.listdir(partials_dir): + if filename.endswith(".html"): + with open(os.path.join(partials_dir, filename), "r") as file: + partials[filename[:-5]] = file.read() + + return templates.TemplateResponse("index.html", {"request": request, **partials}) @app.get("/total-count") async def get_total_url_count(): - count = get_total_count(db_path='crawler_data.db') + count = get_total_count() return JSONResponse(content={"count": count}) # Add endpoit to clear db @app.get("/clear-db") async def clear_database(): - clear_db(db_path='crawler_data.db') + clear_db() return JSONResponse(content={"message": "Database cleared."}) +def import_strategy(module_name: str, class_name: str, *args, **kwargs): + try: + module = importlib.import_module(module_name) + strategy_class = getattr(module, class_name) + return strategy_class(*args, **kwargs) + except ImportError: + raise HTTPException(status_code=400, detail=f"Module {module_name} not found.") + except AttributeError: + raise HTTPException(status_code=400, detail=f"Class {class_name} not found in {module_name}.") + @app.post("/crawl") -async def crawl_urls(urls_input: UrlsInput, request: Request): +async def crawl_urls(crawl_request: CrawlRequest, request: Request): global current_requests - # Raise error if api_token is not provided - if not urls_input.api_token: - raise HTTPException(status_code=401, detail="API token is required.") async with lock: if current_requests >= MAX_CONCURRENT_REQUESTS: raise HTTPException(status_code=429, detail="Too many requests - please try again later.") current_requests += 1 try: - # Prepare URL models for crawling - url_models = [UrlModel(url=url, forced=urls_input.forced) for url in urls_input.urls] + extraction_strategy = import_strategy("crawl4ai.extraction_strategy", crawl_request.extraction_strategy, **crawl_request.extraction_strategy_args) + chunking_strategy = import_strategy("crawl4ai.chunking_strategy", crawl_request.chunking_strategy, **crawl_request.chunking_strategy_args) # Use ThreadPoolExecutor to run the synchronous WebCrawler in async manner with ThreadPoolExecutor() as executor: loop = asyncio.get_event_loop() futures = [ - loop.run_in_executor(executor, get_crawler().fetch_page, url_model, urls_input.provider_model, urls_input.api_token, urls_input.extract_blocks, urls_input.word_count_threshold) - for url_model in url_models + loop.run_in_executor( + executor, + get_crawler().run, + str(url), + crawl_request.word_count_threshold, + extraction_strategy, + chunking_strategy, + crawl_request.bypass_cache, + crawl_request.css_selector, + crawl_request.verbose + ) + for url in crawl_request.urls ] results = await asyncio.gather(*futures) # if include_raw_html is False, remove the raw HTML content from the results - if not urls_input.include_raw_html: + if not crawl_request.include_raw_html: for result in results: result.html = None - + return {"results": [result.dict() for result in results]} finally: async with lock: current_requests -= 1 + +@app.get("/strategies/extraction", response_class=JSONResponse) +async def get_extraction_strategies(): + # Load docs/extraction_strategies.json" and return as JSON response + with open(f"{__location__}/docs/extraction_strategies.json", "r") as file: + return JSONResponse(content=file.read()) -@app.post("/crawl_async") -async def crawl_urls(urls_input: UrlsInput, request: Request): - global current_requests - if not urls_input.api_token: - raise HTTPException(status_code=401, detail="API token is required.") - - async with lock: - if current_requests >= MAX_CONCURRENT_REQUESTS: - raise HTTPException(status_code=429, detail="Too many requests - please try again later.") - current_requests += 1 - - task_id = str(uuid.uuid4()) - tasks[task_id] = {"status": "pending", "results": None} - - try: - url_models = [UrlModel(url=url, forced=urls_input.forced) for url in urls_input.urls] - - loop = asyncio.get_running_loop() - loop.create_task( - process_crawl_task(url_models, urls_input.provider_model, urls_input.api_token, task_id, urls_input.extract_blocks) - ) - return {"task_id": task_id} - finally: - async with lock: - current_requests -= 1 - -async def process_crawl_task(url_models, provider, api_token, task_id, extract_blocks_flag): - try: - with ThreadPoolExecutor() as executor: - loop = asyncio.get_running_loop() - futures = [ - loop.run_in_executor(executor, get_crawler().fetch_page, url_model, provider, api_token, extract_blocks_flag) - for url_model in url_models - ] - results = await asyncio.gather(*futures) - - tasks[task_id] = {"status": "done", "results": results} - except Exception as e: - tasks[task_id] = {"status": "failed", "error": str(e)} - -@app.get("/task/{task_id}") -async def get_task_status(task_id: str): - task = tasks.get(task_id) - if not task: - raise HTTPException(status_code=404, detail="Task not found") - - if task['status'] == 'done': - return { - "status": task['status'], - "results": [result.dict() for result in task['results']] - } - elif task['status'] == 'failed': - return { - "status": task['status'], - "error": task['error'] - } - else: - return {"status": task['status']} +@app.get("/strategies/chunking", response_class=JSONResponse) +async def get_chunking_strategies(): + with open(f"{__location__}/docs/chunking_strategies.json", "r") as file: + return JSONResponse(content=file.read()) + if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/pages/app.css b/pages/app.css new file mode 100644 index 00000000..0e94a2e5 --- /dev/null +++ b/pages/app.css @@ -0,0 +1,131 @@ +:root { + --ifm-font-size-base: 100%; + --ifm-line-height-base: 1.65; + --ifm-font-family-base: system-ui, -apple-system, Segoe UI, Roboto, Ubuntu, Cantarell, Noto Sans, sans-serif, + BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", + "Segoe UI Symbol"; +} +html { + -webkit-font-smoothing: antialiased; + -webkit-text-size-adjust: 100%; + text-size-adjust: 100%; + font: var(--ifm-font-size-base) / var(--ifm-line-height-base) var(--ifm-font-family-base); +} +body { + background-color: #1a202c; + color: #fff; +} +.tab-content { + max-height: 400px; + overflow: auto; +} +pre { + white-space: pre-wrap; + font-size: 14px; +} +pre code { + width: 100%; +} + +/* Custom styling for docs-item class and Markdown generated elements */ +.docs-item { + background-color: #2d3748; /* bg-gray-800 */ + padding: 1rem; /* p-4 */ + border-radius: 0.375rem; /* rounded */ + box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); /* shadow-md */ + margin-bottom: 1rem; /* space between items */ + line-height: 1.5; /* leading-normal */ +} + +.docs-item h3, +.docs-item h4 { + color: #ffffff; /* text-white */ + font-size: 1.25rem; /* text-xl */ + font-weight: 700; /* font-bold */ + margin-bottom: 0.5rem; /* mb-2 */ +} +.docs-item h4 { + font-size: 1rem; /* text-xl */ +} + +.docs-item p { + color: #e2e8f0; /* text-gray-300 */ + margin-bottom: 0.5rem; /* mb-2 */ +} + +.docs-item code { + background-color: #1a202c; /* bg-gray-900 */ + color: #e2e8f0; /* text-gray-300 */ + padding: 0.25rem 0.5rem; /* px-2 py-1 */ + border-radius: 0.25rem; /* rounded */ + font-size: 0.875rem; /* text-sm */ +} + +.docs-item pre { + background-color: #1a202c; /* bg-gray-900 */ + color: #e2e8f0; /* text-gray-300 */ + padding: 0.5rem; /* p-2 */ + border-radius: 0.375rem; /* rounded */ + overflow: auto; /* overflow-auto */ + margin-bottom: 0.5rem; /* mb-2 */ +} + +.docs-item div { + color: #e2e8f0; /* text-gray-300 */ + font-size: 1rem; /* prose prose-sm */ + line-height: 1.25rem; /* line-height for readability */ +} + +/* Adjustments to make prose class more suitable for dark mode */ +.prose { + max-width: none; /* max-w-none */ +} + +.prose p, +.prose ul { + margin-bottom: 1rem; /* mb-4 */ +} + +.prose code { + /* background-color: #4a5568; */ /* bg-gray-700 */ + color: #65a30d; /* text-white */ + padding: 0.25rem 0.5rem; /* px-1 py-0.5 */ + border-radius: 0.25rem; /* rounded */ + display: inline-block; /* inline-block */ +} + +.prose pre { + background-color: #1a202c; /* bg-gray-900 */ + color: #ffffff; /* text-white */ + padding: 0.5rem; /* p-2 */ + border-radius: 0.375rem; /* rounded */ +} + +.prose h3 { + color: #65a30d; /* text-white */ + font-size: 1.25rem; /* text-xl */ + font-weight: 700; /* font-bold */ + margin-bottom: 0.5rem; /* mb-2 */ +} + +body { + background-color: #1a1a1a; + color: #b3ff00; +} +.sidebar { + color: #b3ff00; + border-right: 1px solid #333; +} +.sidebar a { + color: #b3ff00; + text-decoration: none; +} +.sidebar a:hover { + background-color: #555; +} +.content-section { + display: none; +} +.content-section.active { + display: block; +} diff --git a/pages/app.js b/pages/app.js new file mode 100644 index 00000000..200e29d3 --- /dev/null +++ b/pages/app.js @@ -0,0 +1,306 @@ +// JavaScript to manage dynamic form changes and logic +document.getElementById("extraction-strategy-select").addEventListener("change", function () { + const strategy = this.value; + const providerModelSelect = document.getElementById("provider-model-select"); + const tokenInput = document.getElementById("token-input"); + const instruction = document.getElementById("instruction"); + const semantic_filter = document.getElementById("semantic_filter"); + const instruction_div = document.getElementById("instruction_div"); + const semantic_filter_div = document.getElementById("semantic_filter_div"); + const llm_settings = document.getElementById("llm_settings"); + + if (strategy === "LLMExtractionStrategy") { + // providerModelSelect.disabled = false; + // tokenInput.disabled = false; + // semantic_filter.disabled = true; + // instruction.disabled = false; + llm_settings.classList.remove("hidden"); + instruction_div.classList.remove("hidden"); + semantic_filter_div.classList.add("hidden"); + } else if (strategy === "NoExtractionStrategy") { + semantic_filter_div.classList.add("hidden"); + instruction_div.classList.add("hidden"); + llm_settings.classList.add("hidden"); + } else { + // providerModelSelect.disabled = true; + // tokenInput.disabled = true; + // semantic_filter.disabled = false; + // instruction.disabled = true; + llm_settings.classList.add("hidden"); + instruction_div.classList.add("hidden"); + semantic_filter_div.classList.remove("hidden"); + } + + +}); + +// Get the selected provider model and token from local storage +const storedProviderModel = localStorage.getItem("provider_model"); +const storedToken = localStorage.getItem(storedProviderModel); + +if (storedProviderModel) { + document.getElementById("provider-model-select").value = storedProviderModel; +} + +if (storedToken) { + document.getElementById("token-input").value = storedToken; +} + +// Handle provider model dropdown change +document.getElementById("provider-model-select").addEventListener("change", () => { + const selectedProviderModel = document.getElementById("provider-model-select").value; + const storedToken = localStorage.getItem(selectedProviderModel); + + if (storedToken) { + document.getElementById("token-input").value = storedToken; + } else { + document.getElementById("token-input").value = ""; + } +}); + +// Fetch total count from the database +axios + .get("/total-count") + .then((response) => { + document.getElementById("total-count").textContent = response.data.count; + }) + .catch((error) => console.error(error)); + +// Handle crawl button click +document.getElementById("crawl-btn").addEventListener("click", () => { + // validate input to have both URL and API token + // if selected extraction strategy is LLMExtractionStrategy, then API token is required + if (document.getElementById("extraction-strategy-select").value === "LLMExtractionStrategy") { + if (!document.getElementById("url-input").value || !document.getElementById("token-input").value) { + alert("Please enter both URL(s) and API token."); + return; + } + } + + const selectedProviderModel = document.getElementById("provider-model-select").value; + const apiToken = document.getElementById("token-input").value; + const extractBlocks = document.getElementById("extract-blocks-checkbox").checked; + const bypassCache = document.getElementById("bypass-cache-checkbox").checked; + + // Save the selected provider model and token to local storage + localStorage.setItem("provider_model", selectedProviderModel); + localStorage.setItem(selectedProviderModel, apiToken); + + const urlsInput = document.getElementById("url-input").value; + const urls = urlsInput.split(",").map((url) => url.trim()); + const data = { + urls: urls, + include_raw_html: true, + bypass_cache: bypassCache, + extract_blocks: extractBlocks, + word_count_threshold: parseInt(document.getElementById("threshold").value), + extraction_strategy: document.getElementById("extraction-strategy-select").value, + extraction_strategy_args: { + provider: selectedProviderModel, + api_token: apiToken, + instruction: document.getElementById("instruction").value, + semantic_filter: document.getElementById("semantic_filter").value, + }, + chunking_strategy: document.getElementById("chunking-strategy-select").value, + chunking_strategy_args: {}, + css_selector: document.getElementById("css-selector").value, + // instruction: document.getElementById("instruction").value, + // semantic_filter: document.getElementById("semantic_filter").value, + verbose: true, + }; + + // save api token to local storage + localStorage.setItem("api_token", document.getElementById("token-input").value); + + document.getElementById("loading").classList.remove("hidden"); + document.getElementById("result").style.visibility = "hidden"; + document.getElementById("code_help").style.visibility = "hidden"; + + axios + .post("/crawl", data) + .then((response) => { + const result = response.data.results[0]; + const parsedJson = JSON.parse(result.extracted_content); + document.getElementById("json-result").textContent = JSON.stringify(parsedJson, null, 2); + document.getElementById("cleaned-html-result").textContent = result.cleaned_html; + document.getElementById("markdown-result").textContent = result.markdown; + + // Update code examples dynamically + const extractionStrategy = data.extraction_strategy; + const isLLMExtraction = extractionStrategy === "LLMExtractionStrategy"; + + // REMOVE API TOKEN FROM CODE EXAMPLES + data.extraction_strategy_args.api_token = "your_api_token"; + document.getElementById( + "curl-code" + ).textContent = `curl -X POST -H "Content-Type: application/json" -d '${JSON.stringify({ + ...data, + api_token: isLLMExtraction ? "your_api_token" : undefined, + }, null, 2)}' http://crawl4ai.com/crawl`; + + document.getElementById("python-code").textContent = `import requests\n\ndata = ${JSON.stringify( + { ...data, api_token: isLLMExtraction ? "your_api_token" : undefined }, + null, + 2 + )}\n\nresponse = requests.post("http://crawl4ai.com/crawl", json=data) # OR local host if your run locally \nprint(response.json())`; + + document.getElementById( + "nodejs-code" + ).textContent = `const axios = require('axios');\n\nconst data = ${JSON.stringify( + { ...data, api_token: isLLMExtraction ? "your_api_token" : undefined }, + null, + 2 + )};\n\naxios.post("http://crawl4ai.com/crawl", data) // OR local host if your run locally \n .then(response => console.log(response.data))\n .catch(error => console.error(error));`; + + document.getElementById( + "library-code" + ).textContent = `from crawl4ai.web_crawler import WebCrawler\nfrom crawl4ai.extraction_strategy import *\nfrom crawl4ai.chunking_strategy import *\n\ncrawler = WebCrawler()\ncrawler.warmup()\n\nresult = crawler.run(\n url='${ + urls[0] + }',\n word_count_threshold=${data.word_count_threshold},\n extraction_strategy=${ + isLLMExtraction + ? `${extractionStrategy}(provider="${data.provider_model}", api_token="${data.api_token}")` + : extractionStrategy + "()" + },\n chunking_strategy=${data.chunking_strategy}(),\n bypass_cache=${ + data.bypass_cache + },\n css_selector="${data.css_selector}"\n)\nprint(result)`; + + // Highlight code syntax + hljs.highlightAll(); + + // Select JSON tab by default + document.querySelector('.tab-btn[data-tab="json"]').click(); + + document.getElementById("loading").classList.add("hidden"); + + document.getElementById("result").style.visibility = "visible"; + document.getElementById("code_help").style.visibility = "visible"; + + // increment the total count + document.getElementById("total-count").textContent = + parseInt(document.getElementById("total-count").textContent) + 1; + }) + .catch((error) => { + console.error(error); + document.getElementById("loading").classList.add("hidden"); + }); +}); + +// Handle tab clicks +document.querySelectorAll(".tab-btn").forEach((btn) => { + btn.addEventListener("click", () => { + const tab = btn.dataset.tab; + document.querySelectorAll(".tab-btn").forEach((b) => b.classList.remove("bg-lime-700", "text-white")); + btn.classList.add("bg-lime-700", "text-white"); + document.querySelectorAll(".tab-content.code pre").forEach((el) => el.classList.add("hidden")); + document.getElementById(`${tab}-result`).parentElement.classList.remove("hidden"); + }); +}); + +// Handle code tab clicks +document.querySelectorAll(".code-tab-btn").forEach((btn) => { + btn.addEventListener("click", () => { + const tab = btn.dataset.tab; + document.querySelectorAll(".code-tab-btn").forEach((b) => b.classList.remove("bg-lime-700", "text-white")); + btn.classList.add("bg-lime-700", "text-white"); + document.querySelectorAll(".tab-content.result pre").forEach((el) => el.classList.add("hidden")); + document.getElementById(`${tab}-code`).parentElement.classList.remove("hidden"); + }); +}); + +// Handle copy to clipboard button clicks + +async function copyToClipboard(text) { + if (navigator.clipboard && navigator.clipboard.writeText) { + return navigator.clipboard.writeText(text); + } else { + return fallbackCopyTextToClipboard(text); + } +} + +function fallbackCopyTextToClipboard(text) { + return new Promise((resolve, reject) => { + const textArea = document.createElement("textarea"); + textArea.value = text; + + // Avoid scrolling to bottom + textArea.style.top = "0"; + textArea.style.left = "0"; + textArea.style.position = "fixed"; + + document.body.appendChild(textArea); + textArea.focus(); + textArea.select(); + + try { + const successful = document.execCommand("copy"); + if (successful) { + resolve(); + } else { + reject(); + } + } catch (err) { + reject(err); + } + + document.body.removeChild(textArea); + }); +} + +document.querySelectorAll(".copy-btn").forEach((btn) => { + btn.addEventListener("click", () => { + const target = btn.dataset.target; + const code = document.getElementById(target).textContent; + //navigator.clipboard.writeText(code).then(() => { + copyToClipboard(code).then(() => { + btn.textContent = "Copied!"; + setTimeout(() => { + btn.textContent = "Copy"; + }, 2000); + }); + }); +}); + +document.addEventListener("DOMContentLoaded", async () => { + try { + const extractionResponse = await fetch("/strategies/extraction"); + const extractionStrategies = await extractionResponse.json(); + + const chunkingResponse = await fetch("/strategies/chunking"); + const chunkingStrategies = await chunkingResponse.json(); + + renderStrategies("extraction-strategies", extractionStrategies); + renderStrategies("chunking-strategies", chunkingStrategies); + } catch (error) { + console.error("Error fetching strategies:", error); + } +}); + +function renderStrategies(containerId, strategies) { + const container = document.getElementById(containerId); + container.innerHTML = ""; // Clear any existing content + strategies = JSON.parse(strategies); + Object.entries(strategies).forEach(([strategy, description]) => { + const strategyElement = document.createElement("div"); + strategyElement.classList.add("bg-zinc-800", "p-4", "rounded", "shadow-md", "docs-item"); + + const strategyDescription = document.createElement("div"); + strategyDescription.classList.add("text-gray-300", "prose", "prose-sm"); + strategyDescription.innerHTML = marked.parse(description); + + strategyElement.appendChild(strategyDescription); + + container.appendChild(strategyElement); + }); +} +document.querySelectorAll(".sidebar a").forEach((link) => { + link.addEventListener("click", function (event) { + event.preventDefault(); + document.querySelectorAll(".content-section").forEach((section) => { + section.classList.remove("active"); + }); + const target = event.target.getAttribute("data-target"); + document.getElementById(target).classList.add("active"); + }); +}); +// Highlight code syntax +hljs.highlightAll(); diff --git a/pages/index copy.html b/pages/index copy.html new file mode 100644 index 00000000..b61b7298 --- /dev/null +++ b/pages/index copy.html @@ -0,0 +1,971 @@ + + + + + + Crawl4AI + + + + + + + + + + + + + + + + +

+
+

πŸ”₯πŸ•·οΈ Crawl4AI: Web Data for your Thoughts

+
+
+ πŸ“Š Total Website Processed + 2 +
+
+ +
+
+

Try It Now

+
+
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+
+ + +
+
+ + +
+ +
+
+ +
+ +
+ + + +
+
+
+ + +
+
+ +
+
+ + + + +
+
+
+                                
+                                
+                            
+ + + +
+
+
+
+
+
+
+ +
+ 🌟 Welcome to the Crawl4ai Quickstart Guide! Let's dive into some web crawling fun! +
+
+ First Step: Create an instance of WebCrawler and call the warmup() function. +
+
+
crawler = WebCrawler()
+            crawler.warmup()
+
+ + +
+ 🧠 Understanding 'bypass_cache' and 'include_raw_html' parameters: +
+
First crawl (caches the result):
+
+
result = crawler.run(url="https://www.nbcnews.com/business")
+
+
Second crawl (Force to crawl again):
+
+
result = crawler.run(url="https://www.nbcnews.com/business", bypass_cache=True)
+
+
Crawl result without raw HTML content:
+
+
result = crawler.run(url="https://www.nbcnews.com/business", include_raw_html=False)
+
+ + +
+ πŸ“„ + The 'include_raw_html' parameter, when set to True, includes the raw HTML content in the + response. By default, it is set to True. +
+
Set always_by_pass_cache to True:
+
+
crawler.always_by_pass_cache = True
+
+ + +
+ 🧩 Let's add a chunking strategy: RegexChunking! +
+
Using RegexChunking:
+
+
result = crawler.run(
+                url="https://www.nbcnews.com/business",
+                chunking_strategy=RegexChunking(patterns=["\n\n"])
+            )
+
+
Using NlpSentenceChunking:
+
+
result = crawler.run(
+                url="https://www.nbcnews.com/business",
+                chunking_strategy=NlpSentenceChunking()
+            )
+
+ + +
+ 🧠 Let's get smarter with an extraction strategy: CosineStrategy! +
+
Using CosineStrategy:
+
+
result = crawler.run(
+                url="https://www.nbcnews.com/business",
+                extraction_strategy=CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3)
+            )
+
+ + +
+ πŸ€– Time to bring in the big guns: LLMExtractionStrategy without instructions! +
+
Using LLMExtractionStrategy without instructions:
+
+
result = crawler.run(
+                url="https://www.nbcnews.com/business",
+                extraction_strategy=LLMExtractionStrategy(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'))
+            )
+
+ + +
+ πŸ“œ Let's make it even more interesting: LLMExtractionStrategy with instructions! +
+
Using LLMExtractionStrategy with instructions:
+
+
result = crawler.run(
+                url="https://www.nbcnews.com/business",
+                extraction_strategy=LLMExtractionStrategy(
+                    provider="openai/gpt-4o",
+                    api_token=os.getenv('OPENAI_API_KEY'),
+                    instruction="I am interested in only financial news"
+                )
+            )
+
+ + +
+ 🎯 Targeted extraction: Let's use a CSS selector to extract only H2 tags! +
+
Using CSS selector to extract H2 tags:
+
+
result = crawler.run(
+                url="https://www.nbcnews.com/business",
+                css_selector="h2"
+            )
+
+ + +
+ πŸ–±οΈ Let's get interactive: Passing JavaScript code to click 'Load More' button! +
+
Using JavaScript to click 'Load More' button:
+
+
js_code = """
+            const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
+            loadMoreButton && loadMoreButton.click();
+            """
+            crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code)
+            crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True)
+            result = crawler.run(url="https://www.nbcnews.com/business")
+
+ + +
+ πŸŽ‰ + Congratulations! You've made it through the Crawl4ai Quickstart Guide! Now go forth and crawl + the web like a pro! πŸ•ΈοΈ +
+
+
+
+

Installation πŸ’»

+

+ There are two ways to use Crawl4AI: as a library in your Python projects or as a standalone local + server. +

+ +

+ You can also try Crawl4AI in a Google Colab + Open In Colab +

+ +

Using Crawl4AI as a Library πŸ“š

+

To install Crawl4AI as a library, follow these steps:

+ +
    +
  1. + Install the package from GitHub: +
    pip install git+https://github.com/unclecode/crawl4ai.git
    +
  2. +
  3. + Alternatively, you can clone the repository and install the package locally: +
    virtualenv venv
    +source venv/bin/activate
    +git clone https://github.com/unclecode/crawl4ai.git
    +cd crawl4ai
    +pip install -e .
    +        
    +
  4. +
  5. + Import the necessary modules in your Python script: +
    from crawl4ai.web_crawler import WebCrawler
    +from crawl4ai.chunking_strategy import *
    +from crawl4ai.extraction_strategy import *
    +import os
    +
    +crawler = WebCrawler()
    +
    +# Single page crawl
    +single_url = UrlModel(url='https://www.nbcnews.com/business', forced=False)
    +result = crawl4ai.fetch_page(
    +    url='https://www.nbcnews.com/business',
    +    word_count_threshold=5, # Minimum word count for a HTML tag to be considered as a worthy block
    +    chunking_strategy= RegexChunking( patterns = ["\\n\\n"]), # Default is RegexChunking
    +    extraction_strategy= CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method='ward', top_k=3) # Default is CosineStrategy
    +    # extraction_strategy= LLMExtractionStrategy(provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY')),
    +    bypass_cache=False,
    +    extract_blocks =True, # Whether to extract semantical blocks of text from the HTML
    +    css_selector = "", # Eg: "div.article-body"
    +    verbose=True,
    +    include_raw_html=True, # Whether to include the raw HTML content in the response
    +)
    +print(result.model_dump())
    +        
    +
  6. +
+

+ For more information about how to run Crawl4AI as a local server, please refer to the + GitHub repository. +

+ +
+ +
+

πŸ“– Parameters

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescriptionRequiredDefault Value
urls + A list of URLs to crawl and extract data from. + Yes-
include_raw_html + Whether to include the raw HTML content in the response. + Nofalse
bypass_cache + Whether to force a fresh crawl even if the URL has been previously crawled. + Nofalse
extract_blocks + Whether to extract semantical blocks of text from the HTML. + Notrue
word_count_threshold + The minimum number of words a block must contain to be considered meaningful (minimum + value is 5). + No5
extraction_strategy + The strategy to use for extracting content from the HTML (e.g., "CosineStrategy"). + NoCosineStrategy
chunking_strategy + The strategy to use for chunking the text before processing (e.g., "RegexChunking"). + NoRegexChunking
css_selector + The CSS selector to target specific parts of the HTML for extraction. + NoNone
verboseWhether to enable verbose logging.Notrue
+
+
+ +
+
+

Extraction Strategies

+
+
+
+ +
+
+

Chunking Strategies

+
+
+
+ +
+
+

πŸ€” Why building this?

+

+ In recent times, we've witnessed a surge of startups emerging, riding the AI hype wave and charging + for services that should rightfully be accessible to everyone. πŸŒπŸ’Έ One such example is scraping and + crawling web pages and transforming them into a format suitable for Large Language Models (LLMs). + πŸ•ΈοΈπŸ€– We believe that building a business around this is not the right approach; instead, it should + definitely be open-source. πŸ†“πŸŒŸ So, if you possess the skills to build such tools and share our + philosophy, we invite you to join our "Robinhood" band and help set these products free for the + benefit of all. 🀝πŸ’ͺ +

+
+
+ +
+
+

βš™οΈ Installation

+

+ To install and run Crawl4AI as a library or a local server, please refer to the πŸ“š + GitHub repository. +

+
+
+ + + + + + diff --git a/pages/index.html b/pages/index.html index 63d32dd9..2947c34a 100644 --- a/pages/index.html +++ b/pages/index.html @@ -9,387 +9,65 @@ - + + + + + - - -
-
-

πŸ”₯πŸ•·οΈ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper

+ +
+ +
+

πŸ”₯πŸ•·οΈ Crawl4AI: Web Data for your Thoughts

+
+
+ πŸ“Š Total Website Processed + 2
+ + {{ try_it | safe }} - -
-
- πŸ“Š Total Website Procceced - 0 -
-
- -
-
-

Try It Now

-
-
- - -
- -
- - +
+
+
+ -
- + +
+ {{installation | safe}} {{how_to_guide | safe}} - -
-
- - - -
-
- - -
- -
-
- -
-
- - - -
-
-
- - -
-
-
-
- - - -
-
-
-                                    
-                                    
-                                
- - -
+
+

Chunking Strategies

+

Content for chunking strategies...

+
+
+

Extraction Strategies

+

Content for extraction strategies...

+
-
- -
-
-

πŸ€” Why building this?

-

- In recent times, we've witnessed a surge of startups emerging, riding the AI hype wave and charging - for services that should rightfully be accessible to everyone. πŸŒπŸ’Έ One such example is scraping and - crawling web pages and transforming them into a format suitable for Large Language Models (LLMs). - πŸ•ΈοΈπŸ€– We believe that building a business around this is not the right approach; instead, it should - definitely be open-source. πŸ†“πŸŒŸ So, if you possess the skills to build such tools and share our - philosophy, we invite you to join our "Robinhood" band and help set these products free for the - benefit of all. 🀝πŸ’ͺ -

-
- -
-
-

βš™οΈ Installation

-

- To install and run Crawl4AI as a library or a local server, please refer to the πŸ“š - GitHub repository. -

-
-
- - - - + + {{ footer | safe }} + diff --git a/pages/index_pooling.html b/pages/index_pooling.html index 50e57f01..920801d1 100644 --- a/pages/index_pooling.html +++ b/pages/index_pooling.html @@ -283,7 +283,7 @@ .post("/crawl", data) .then((response) => { const result = response.data.results[0]; - const parsedJson = JSON.parse(result.parsed_json); + const parsedJson = JSON.parse(result.extracted_content); document.getElementById("json-result").textContent = JSON.stringify(parsedJson, null, 2); document.getElementById("cleaned-html-result").textContent = result.cleaned_html; document.getElementById("markdown-result").textContent = result.markdown; diff --git a/pages/partial/footer.html b/pages/partial/footer.html new file mode 100644 index 00000000..3ab189e1 --- /dev/null +++ b/pages/partial/footer.html @@ -0,0 +1,36 @@ +
+
+

πŸ€” Why building this?

+

+ In recent times, we've witnessed a surge of startups emerging, riding the AI hype wave and charging + for services that should rightfully be accessible to everyone. πŸŒπŸ’Έ One such example is scraping and + crawling web pages and transforming them into a format suitable for Large Language Models (LLMs). + πŸ•ΈοΈπŸ€– We believe that building a business around this is not the right approach; instead, it should + definitely be open-source. πŸ†“πŸŒŸ So, if you possess the skills to build such tools and share our + philosophy, we invite you to join our "Robinhood" band and help set these products free for the + benefit of all. 🀝πŸ’ͺ +

+
+
+ + \ No newline at end of file diff --git a/pages/partial/how_to_guide.html b/pages/partial/how_to_guide.html new file mode 100644 index 00000000..b8f85ed6 --- /dev/null +++ b/pages/partial/how_to_guide.html @@ -0,0 +1,160 @@ +
+

How to Guide

+
+ +
+ 🌟 + Welcome to the Crawl4ai Quickstart Guide! Let's dive into some web crawling + fun! +
+
+ First Step: Create an instance of WebCrawler and call the + warmup() function. +
+
+
crawler = WebCrawler()
+crawler.warmup()
+
+ + +
+ 🧠 Understanding 'bypass_cache' and 'include_raw_html' parameters: +
+
First crawl (caches the result):
+
+
result = crawler.run(url="https://www.nbcnews.com/business")
+
+
Second crawl (Force to crawl again):
+
+
result = crawler.run(url="https://www.nbcnews.com/business", bypass_cache=True)
+
+ ⚠️ Don't forget to set `bypass_cache` to True if you want to try different strategies for the same URL. Otherwise, the cached result will be returned. You can also set `always_by_pass_cache` in constructor to True to always bypass the cache. +
+
+
Crawl result without raw HTML content:
+
+
result = crawler.run(url="https://www.nbcnews.com/business", include_raw_html=False)
+
+ + +
+ πŸ“„ + The 'include_raw_html' parameter, when set to True, includes the raw HTML content + in the response. By default, it is set to True. +
+
Set always_by_pass_cache to True:
+
+
crawler.always_by_pass_cache = True
+
+ + +
+ 🧩 Let's add a chunking strategy: RegexChunking! +
+
Using RegexChunking:
+
+
result = crawler.run(
+    url="https://www.nbcnews.com/business",
+    chunking_strategy=RegexChunking(patterns=["\n\n"])
+)
+
+
Using NlpSentenceChunking:
+
+
result = crawler.run(
+    url="https://www.nbcnews.com/business",
+    chunking_strategy=NlpSentenceChunking()
+)
+
+ + +
+ 🧠 Let's get smarter with an extraction strategy: CosineStrategy! +
+
Using CosineStrategy:
+
+
result = crawler.run(
+    url="https://www.nbcnews.com/business",
+    extraction_strategy=CosineStrategy(word_count_threshold=10, max_dist=0.2, linkage_method="ward", top_k=3)
+)
+
+ + +
+ πŸ€– + Time to bring in the big guns: LLMExtractionStrategy without instructions! +
+
Using LLMExtractionStrategy without instructions:
+
+
result = crawler.run(
+    url="https://www.nbcnews.com/business",
+    extraction_strategy=LLMExtractionStrategy(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'))
+)
+
+ + +
+ πŸ“œ + Let's make it even more interesting: LLMExtractionStrategy with + instructions! +
+
Using LLMExtractionStrategy with instructions:
+
+
result = crawler.run(
+    url="https://www.nbcnews.com/business",
+    extraction_strategy=LLMExtractionStrategy(
+    provider="openai/gpt-4o",
+    api_token=os.getenv('OPENAI_API_KEY'),
+    instruction="I am interested in only financial news"
+)
+)
+
+ + +
+ 🎯 + Targeted extraction: Let's use a CSS selector to extract only H2 tags! +
+
Using CSS selector to extract H2 tags:
+
+
result = crawler.run(
+    url="https://www.nbcnews.com/business",
+    css_selector="h2"
+)
+
+ + +
+ πŸ–±οΈ + Let's get interactive: Passing JavaScript code to click 'Load More' button! +
+
Using JavaScript to click 'Load More' button:
+
+
js_code = """
+const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
+loadMoreButton && loadMoreButton.click();
+"""
+crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code)
+crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True)
+result = crawler.run(url="https://www.nbcnews.com/business")
+
+ + +
+ πŸŽ‰ + Congratulations! You've made it through the Crawl4ai Quickstart Guide! Now go forth + and crawl the web like a pro! πŸ•ΈοΈ +
+
+
\ No newline at end of file diff --git a/pages/partial/installation.html b/pages/partial/installation.html new file mode 100644 index 00000000..6a6561cd --- /dev/null +++ b/pages/partial/installation.html @@ -0,0 +1,65 @@ +
+

Installation πŸ’»

+

+ There are three ways to use Crawl4AI: +

    +
  1. + As a library +
  2. +
  3. + As a local server (Docker) +
  4. +
  5. + As a Google Colab notebook. Open In Colab +
  6. +

    + + +

    To install Crawl4AI as a library, follow these steps:

    + +
      +
    1. + Install the package from GitHub: +
      virtualenv venv
      +source venv/bin/activate
      +pip install "crawl4ai[all] @ git+https://github.com/unclecode/crawl4ai.git"
      +            
      +
    2. +
    3. + Run the following command to load the required models. This is optional, but it will boost the performance and speed of the crawler. You need to do this only once. +
      crawl4ai-download-models
      +
    4. +
    5. + Alternatively, you can clone the repository and install the package locally: +
      virtualenv venv
      +source venv/bin/activate
      +git clone https://github.com/unclecode/crawl4ai.git
      +cd crawl4ai
      +pip install -e .[all]
      +
      +
    6. +
    7. + Use docker to run the local server: +
      docker build -t crawl4ai . 
      +# docker build --platform linux/amd64 -t crawl4ai . For Mac users
      +docker run -d -p 8000:80 crawl4ai
      +
    8. +
    +

    + For more information about how to run Crawl4AI as a local server, please refer to the + GitHub repository. +

    +
\ No newline at end of file diff --git a/pages/partial/try_it.html b/pages/partial/try_it.html new file mode 100644 index 00000000..b7fa2a13 --- /dev/null +++ b/pages/partial/try_it.html @@ -0,0 +1,204 @@ +
+
+

Try It Now

+
+
+
+ + +
+
+
+ + +
+
+ + +
+
+
+
+ + +
+
+ + +
+
+ +
+ + + +
+
+
+ + +
+
+ + +
+ +
+
+ + +
+
+ + + +
+
+
+ + +
+
+ +
+
+ + + + +
+
+
+                        
+                        
+                    
+ + + +
+
+
+
+
diff --git a/pages/tmp.html b/pages/tmp.html new file mode 100644 index 00000000..7c924676 --- /dev/null +++ b/pages/tmp.html @@ -0,0 +1,434 @@ +
+
+

Installation πŸ’»

+

There are three ways to use Crawl4AI:

+
    +
  1. As a library
  2. +
  3. As a local server (Docker)
  4. +
  5. + As a Google Colab notebook. + Open In Colab +
  6. +

    + +

    To install Crawl4AI as a library, follow these steps:

    + +
      +
    1. + Install the package from GitHub: +
      pip install git+https://github.com/unclecode/crawl4ai.git
      +
    2. +
    3. + Alternatively, you can clone the repository and install the package locally: +
      virtualenv venv
      +source venv/bin/activate
      +git clone https://github.com/unclecode/crawl4ai.git
      +cd crawl4ai
      +pip install -e .
      +
      +
    4. +
    5. + Use docker to run the local server: +
      docker build -t crawl4ai . 
      +# docker build --platform linux/amd64 -t crawl4ai . For Mac users
      +docker run -d -p 8000:80 crawl4ai
      +
    6. +
    +

    + For more information about how to run Crawl4AI as a local server, please refer to the + GitHub repository. +

    +
+
+
+

How to Guide

+
+ +
+ 🌟 + Welcome to the Crawl4ai Quickstart Guide! Let's dive into some web crawling fun! +
+
+ First Step: Create an instance of WebCrawler and call the + warmup() function. +
+
+
crawler = WebCrawler()
+crawler.warmup()
+
+ + +
+ 🧠 Understanding 'bypass_cache' and 'include_raw_html' parameters: +
+
First crawl (caches the result):
+
+
result = crawler.run(url="https://www.nbcnews.com/business")
+
+
Second crawl (Force to crawl again):
+
+
result = crawler.run(url="https://www.nbcnews.com/business", bypass_cache=True)
+
+ ⚠️ Don't forget to set `bypass_cache` to True if you want to try different strategies + for the same URL. Otherwise, the cached result will be returned. You can also set + `always_by_pass_cache` in constructor to True to always bypass the cache. +
+
+
Crawl result without raw HTML content:
+
+
result = crawler.run(url="https://www.nbcnews.com/business", include_raw_html=False)
+
+ + +
+ πŸ“„ + The 'include_raw_html' parameter, when set to True, includes the raw HTML content in the response. + By default, it is set to True. +
+
Set always_by_pass_cache to True:
+
+
crawler.always_by_pass_cache = True
+
+ + +
+ 🧩 Let's add a chunking strategy: RegexChunking! +
+
Using RegexChunking:
+
+
result = crawler.run(
+url="https://www.nbcnews.com/business",
+chunking_strategy=RegexChunking(patterns=["\n\n"])
+)
+
+
Using NlpSentenceChunking:
+
+
result = crawler.run(
+url="https://www.nbcnews.com/business",
+chunking_strategy=NlpSentenceChunking()
+)
+
+ + +
+ 🧠 Let's get smarter with an extraction strategy: CosineStrategy! +
+
Using CosineStrategy:
+
+
result = crawler.run(
+url="https://www.nbcnews.com/business",
+extraction_strategy=CosineStrategy(word_count_threshold=20, max_dist=0.2, linkage_method="ward", top_k=3)
+)
+
+ + +
+ πŸ€– + Time to bring in the big guns: LLMExtractionStrategy without instructions! +
+
Using LLMExtractionStrategy without instructions:
+
+
result = crawler.run(
+url="https://www.nbcnews.com/business",
+extraction_strategy=LLMExtractionStrategy(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'))
+)
+
+ + +
+ πŸ“œ + Let's make it even more interesting: LLMExtractionStrategy with instructions! +
+
Using LLMExtractionStrategy with instructions:
+
+
result = crawler.run(
+url="https://www.nbcnews.com/business",
+extraction_strategy=LLMExtractionStrategy(
+provider="openai/gpt-4o",
+api_token=os.getenv('OPENAI_API_KEY'),
+instruction="I am interested in only financial news"
+)
+)
+
+ + +
+ 🎯 + Targeted extraction: Let's use a CSS selector to extract only H2 tags! +
+
Using CSS selector to extract H2 tags:
+
+
result = crawler.run(
+url="https://www.nbcnews.com/business",
+css_selector="h2"
+)
+
+ + +
+ πŸ–±οΈ + Let's get interactive: Passing JavaScript code to click 'Load More' button! +
+
Using JavaScript to click 'Load More' button:
+
+
js_code = """
+const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More'));
+loadMoreButton && loadMoreButton.click();
+"""
+crawler_strategy = LocalSeleniumCrawlerStrategy(js_code=js_code)
+crawler = WebCrawler(crawler_strategy=crawler_strategy, always_by_pass_cache=True)
+result = crawler.run(url="https://www.nbcnews.com/business")
+
+ + +
+ πŸŽ‰ + Congratulations! You've made it through the Crawl4ai Quickstart Guide! Now go forth and crawl the + web like a pro! πŸ•ΈοΈ +
+
+
+ +
+
+
+

RegexChunking

+

+ RegexChunking is a text chunking strategy that splits a given text into smaller parts + using regular expressions. This is useful for preparing large texts for processing by language + models, ensuring they are divided into manageable segments. +

+

Constructor Parameters:

+
    +
  • + patterns (list, optional): A list of regular expression patterns used to split the + text. Default is to split by double newlines (['\n\n']). +
  • +
+

Example usage:

+
chunker = RegexChunking(patterns=[r'\n\n', r'\. '])
+chunks = chunker.chunk("This is a sample text. It will be split into chunks.")
+
+
+
+
+
+

NlpSentenceChunking

+

+ NlpSentenceChunking uses a natural language processing model to chunk a given text into + sentences. This approach leverages SpaCy to accurately split text based on sentence boundaries. +

+

Constructor Parameters:

+
    +
  • + None. +
  • +
+

Example usage:

+
chunker = NlpSentenceChunking()
+chunks = chunker.chunk("This is a sample text. It will be split into sentences.")
+
+
+
+
+
+

TopicSegmentationChunking

+

+ TopicSegmentationChunking uses the TextTiling algorithm to segment a given text into + topic-based chunks. This method identifies thematic boundaries in the text. +

+

Constructor Parameters:

+
    +
  • + num_keywords (int, optional): The number of keywords to extract for each topic + segment. Default is 3. +
  • +
+

Example usage:

+
chunker = TopicSegmentationChunking(num_keywords=3)
+chunks = chunker.chunk("This is a sample text. It will be split into topic-based segments.")
+
+
+
+
+
+

FixedLengthWordChunking

+

+ FixedLengthWordChunking splits a given text into chunks of fixed length, based on the + number of words. +

+

Constructor Parameters:

+
    +
  • + chunk_size (int, optional): The number of words in each chunk. Default is + 100. +
  • +
+

Example usage:

+
chunker = FixedLengthWordChunking(chunk_size=100)
+chunks = chunker.chunk("This is a sample text. It will be split into fixed-length word chunks.")
+
+
+
+
+
+

SlidingWindowChunking

+

+ SlidingWindowChunking uses a sliding window approach to chunk a given text. Each chunk + has a fixed length, and the window slides by a specified step size. +

+

Constructor Parameters:

+
    +
  • + window_size (int, optional): The number of words in each chunk. Default is + 100. +
  • +
  • + step (int, optional): The number of words to slide the window. Default is + 50. +
  • +
+

Example usage:

+
chunker = SlidingWindowChunking(window_size=100, step=50)
+chunks = chunker.chunk("This is a sample text. It will be split using a sliding window approach.")
+
+
+
+
+
+
+
+

NoExtractionStrategy

+

+ NoExtractionStrategy is a basic extraction strategy that returns the entire HTML + content without any modification. It is useful for cases where no specific extraction is required. + Only clean html, and amrkdown. +

+

Constructor Parameters:

+

None.

+

Example usage:

+
extractor = NoExtractionStrategy()
+extracted_content = extractor.extract(url, html)
+
+
+
+
+
+

LLMExtractionStrategy

+

+ LLMExtractionStrategy uses a Language Model (LLM) to extract meaningful blocks or + chunks from the given HTML content. This strategy leverages an external provider for language model + completions. +

+

Constructor Parameters:

+
    +
  • + provider (str, optional): The provider to use for the language model completions. + Default is DEFAULT_PROVIDER (e.g., openai/gpt-4). +
  • +
  • + api_token (str, optional): The API token for the provider. If not provided, it will + try to load from the environment variable OPENAI_API_KEY. +
  • +
  • + instruction (str, optional): An instruction to guide the LLM on how to perform the + extraction. This allows users to specify the type of data they are interested in or set the tone + of the response. Default is None. +
  • +
+

Example usage:

+
extractor = LLMExtractionStrategy(provider='openai', api_token='your_api_token', instruction='Extract only news about AI.')
+extracted_content = extractor.extract(url, html)
+
+

+ By providing clear instructions, users can tailor the extraction process to their specific needs, + enhancing the relevance and utility of the extracted content. +

+
+
+
+
+

CosineStrategy

+

+ CosineStrategy uses hierarchical clustering based on cosine similarity to extract + clusters of text from the given HTML content. This strategy is suitable for identifying related + content sections. +

+

Constructor Parameters:

+
    +
  • + semantic_filter (str, optional): A string containing keywords for filtering relevant + documents before clustering. If provided, documents are filtered based on their cosine + similarity to the keyword filter embedding. Default is None. +
  • +
  • + word_count_threshold (int, optional): Minimum number of words per cluster. Default + is 20. +
  • +
  • + max_dist (float, optional): The maximum cophenetic distance on the dendrogram to + form clusters. Default is 0.2. +
  • +
  • + linkage_method (str, optional): The linkage method for hierarchical clustering. + Default is 'ward'. +
  • +
  • + top_k (int, optional): Number of top categories to extract. Default is + 3. +
  • +
  • + model_name (str, optional): The model name for embedding generation. Default is + 'BAAI/bge-small-en-v1.5'. +
  • +
+

Example usage:

+
extractor = CosineStrategy(semantic_filter='artificial intelligence', word_count_threshold=10, max_dist=0.2, linkage_method='ward', top_k=3, model_name='BAAI/bge-small-en-v1.5')
+extracted_content = extractor.extract(url, html)
+
+

Cosine Similarity Filtering

+

+ When a semantic_filter is provided, the CosineStrategy applies an + embedding-based filtering process to select relevant documents before performing hierarchical + clustering. +

+
+
+
+
+

TopicExtractionStrategy

+

+ TopicExtractionStrategy uses the TextTiling algorithm to segment the HTML content into + topics and extracts keywords for each segment. This strategy is useful for identifying and + summarizing thematic content. +

+

Constructor Parameters:

+
    +
  • + num_keywords (int, optional): Number of keywords to represent each topic segment. + Default is 3. +
  • +
+

Example usage:

+
extractor = TopicExtractionStrategy(num_keywords=3)
+extracted_content = extractor.extract(url, html)
+
+
+
+
+
diff --git a/requirements.txt b/requirements.txt index cb95dbd0..ccd9a41a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,19 @@ -fastapi -uvicorn -selenium -pydantic -aiohttp -aiosqlite -chromedriver_autoinstaller -httpx -requests -bs4 -html2text -litellm -python-dotenv \ No newline at end of file +aiohttp==3.9.5 +aiosqlite==0.20.0 +bs4==0.0.2 +fastapi==0.111.0 +html2text==2024.2.26 +httpx==0.27.0 +lazy_import==0.2.2 +litellm==1.37.11 +nltk==3.8.1 +pydantic==2.7.1 +python-dotenv==1.0.1 +requests==2.31.0 +rich==13.7.1 +scikit-learn==1.4.2 +selenium==4.20.0 +uvicorn==0.29.0 +transformers==4.40.2 +chromedriver-autoinstaller==0.6.4 +torch==2.3.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 0bcb419a..bfacedf6 100644 --- a/setup.py +++ b/setup.py @@ -3,10 +3,16 @@ from setuptools import setup, find_packages # Read the requirements from requirements.txt with open("requirements.txt") as f: requirements = f.read().splitlines() - + +# Define the requirements for different environments +requirements_without_torch = [req for req in requirements if not req.startswith("torch")] +requirements_without_transformers = [req for req in requirements if not req.startswith("transformers")] +requirements_without_nltk = [req for req in requirements if not req.startswith("nltk")] +requirements_without_torch_transformers_nlkt = [req for req in requirements if not req.startswith("torch") and not req.startswith("transformers") and not req.startswith("nltk")] + setup( name="Crawl4AI", - version="0.1.0", + version="0.2.0", description="πŸ”₯πŸ•·οΈ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper", long_description=open("README.md").read(), long_description_content_type="text/markdown", @@ -15,7 +21,17 @@ setup( author_email="unclecode@kidocode.com", license="MIT", packages=find_packages(), - install_requires=requirements, + install_requires=requirements_without_torch_transformers_nlkt, + extras_require={ + "all": requirements, # Include all requirements + "colab": requirements_without_torch, # Exclude torch for Colab + "crawl": requirements_without_torch_transformers_nlkt + }, + entry_points={ + 'console_scripts': [ + 'crawl4ai-download-models=crawl4ai.model_loader:main', + ], + }, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", @@ -27,4 +43,4 @@ setup( "Programming Language :: Python :: 3.10", ], python_requires=">=3.7", -) \ No newline at end of file +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_web_crawler.py b/tests/test_web_crawler.py new file mode 100644 index 00000000..99360f42 --- /dev/null +++ b/tests/test_web_crawler.py @@ -0,0 +1,111 @@ +import unittest, os +from crawl4ai.web_crawler import WebCrawler +from crawl4ai.chunking_strategy import RegexChunking, FixedLengthWordChunking, SlidingWindowChunking +from crawl4ai.extraction_strategy import CosineStrategy, LLMExtractionStrategy, TopicExtractionStrategy, NoExtractionStrategy + +class TestWebCrawler(unittest.TestCase): + + def setUp(self): + self.crawler = WebCrawler() + + def test_warmup(self): + self.crawler.warmup() + self.assertTrue(self.crawler.ready, "WebCrawler failed to warm up") + + def test_run_default_strategies(self): + result = self.crawler.run( + url='https://www.nbcnews.com/business', + word_count_threshold=5, + chunking_strategy=RegexChunking(), + extraction_strategy=CosineStrategy(), bypass_cache=True + ) + self.assertTrue(result.success, "Failed to crawl and extract using default strategies") + + def test_run_different_strategies(self): + url = 'https://www.nbcnews.com/business' + + # Test with FixedLengthWordChunking and LLMExtractionStrategy + result = self.crawler.run( + url=url, + word_count_threshold=5, + chunking_strategy=FixedLengthWordChunking(chunk_size=100), + extraction_strategy=LLMExtractionStrategy(provider="openai/gpt-3.5-turbo", api_token=os.getenv('OPENAI_API_KEY')), bypass_cache=True + ) + self.assertTrue(result.success, "Failed to crawl and extract with FixedLengthWordChunking and LLMExtractionStrategy") + + # Test with SlidingWindowChunking and TopicExtractionStrategy + result = self.crawler.run( + url=url, + word_count_threshold=5, + chunking_strategy=SlidingWindowChunking(window_size=100, step=50), + extraction_strategy=TopicExtractionStrategy(num_keywords=5), bypass_cache=True + ) + self.assertTrue(result.success, "Failed to crawl and extract with SlidingWindowChunking and TopicExtractionStrategy") + + def test_invalid_url(self): + with self.assertRaises(Exception) as context: + self.crawler.run(url='invalid_url', bypass_cache=True) + self.assertIn("Invalid URL", str(context.exception)) + + def test_unsupported_extraction_strategy(self): + with self.assertRaises(Exception) as context: + self.crawler.run(url='https://www.nbcnews.com/business', extraction_strategy="UnsupportedStrategy", bypass_cache=True) + self.assertIn("Unsupported extraction strategy", str(context.exception)) + + def test_invalid_css_selector(self): + with self.assertRaises(ValueError) as context: + self.crawler.run(url='https://www.nbcnews.com/business', css_selector="invalid_selector", bypass_cache=True) + self.assertIn("Invalid CSS selector", str(context.exception)) + + + def test_crawl_with_cache_and_bypass_cache(self): + url = 'https://www.nbcnews.com/business' + + # First crawl with cache enabled + result = self.crawler.run(url=url, bypass_cache=False) + self.assertTrue(result.success, "Failed to crawl and cache the result") + + # Second crawl with bypass_cache=True + result = self.crawler.run(url=url, bypass_cache=True) + self.assertTrue(result.success, "Failed to bypass cache and fetch fresh data") + + def test_fetch_multiple_pages(self): + urls = [ + 'https://www.nbcnews.com/business', + 'https://www.bbc.com/news' + ] + results = [] + for url in urls: + result = self.crawler.run( + url=url, + word_count_threshold=5, + chunking_strategy=RegexChunking(), + extraction_strategy=CosineStrategy(), + bypass_cache=True + ) + results.append(result) + + self.assertEqual(len(results), 2, "Failed to crawl and extract multiple pages") + for result in results: + self.assertTrue(result.success, "Failed to crawl and extract a page in the list") + + def test_run_fixed_length_word_chunking_and_no_extraction(self): + result = self.crawler.run( + url='https://www.nbcnews.com/business', + word_count_threshold=5, + chunking_strategy=FixedLengthWordChunking(chunk_size=100), + extraction_strategy=NoExtractionStrategy(), bypass_cache=True + ) + self.assertTrue(result.success, "Failed to crawl and extract with FixedLengthWordChunking and NoExtractionStrategy") + + def test_run_sliding_window_and_no_extraction(self): + result = self.crawler.run( + url='https://www.nbcnews.com/business', + word_count_threshold=5, + chunking_strategy=SlidingWindowChunking(window_size=100, step=50), + extraction_strategy=NoExtractionStrategy(), bypass_cache=True + ) + self.assertTrue(result.success, "Failed to crawl and extract with SlidingWindowChunking and NoExtractionStrategy") + +if __name__ == '__main__': + unittest.main()