diff --git a/.gitignore b/.gitignore index d91cb941..4dba4f26 100644 --- a/.gitignore +++ b/.gitignore @@ -165,6 +165,8 @@ Crawl4AI.egg-info/ Crawl4AI.egg-info/* crawler_data.db .vscode/ +.tests/ +.test_pads/ test_pad.py test_pad*.py .data/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 3db7d01b..73f6ef69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## [v0.2.76] - 2024-08-02 + +Major improvements in functionality, performance, and cross-platform compatibility! 🚀 + +- 🐳 **Docker enhancements**: Significantly improved Dockerfile for easy installation on Linux, Mac, and Windows. +- 🌐 **Official Docker Hub image**: Launched our first official image on Docker Hub for streamlined deployment. +- 🔧 **Selenium upgrade**: Removed dependency on ChromeDriver, now using Selenium's built-in capabilities for better compatibility. +- 🖼️ **Image description**: Implemented ability to generate textual descriptions for extracted images from web pages. +- ⚡ **Performance boost**: Various improvements to enhance overall speed and performance. + +A big shoutout to our amazing community contributors: +- [@aravindkarnam](https://github.com/aravindkarnam) for developing the textual description extraction feature. +- [@FractalMind](https://github.com/FractalMind) for creating the first official Docker Hub image and fixing Dockerfile errors. +- [@ketonkss4](https://github.com/ketonkss4) for identifying Selenium's new capabilities, helping us reduce dependencies. + +Your contributions are driving Crawl4AI forward! 🙌 + +## [v0.2.75] - 2024-07-19 + +Minor improvements for a more maintainable codebase: + +- 🔄 Fixed typos in `chunking_strategy.py` and `crawler_strategy.py` to improve code readability +- 🔄 Removed `.test_pads/` directory from `.gitignore` to keep our repository clean and organized + +These changes may seem small, but they contribute to a more stable and sustainable codebase. By fixing typos and updating our `.gitignore` settings, we're ensuring that our code is easier to maintain and scale in the long run. + ## [v0.2.74] - 2024-07-08 A slew of exciting updates to improve the crawler's stability and robustness! 🎉 diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 00000000..0e45ca85 --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,31 @@ +# Contributors to Crawl4AI + +We would like to thank the following people for their contributions to Crawl4AI: + +## Core Team + +- [Unclecode](https://github.com/unclecode) - Project Creator and Main Developer +- [Nasrin](https://github.com/ntohidi) - Project Manager and Developer + +## Community Contributors + +- [Aravind Karnam](https://github.com/aravindkarnam) - Developed textual description extraction feature +- [FractalMind](https://github.com/FractalMind) - Created the first official Docker Hub image and fixed Dockerfile errors +- [ketonkss4](https://github.com/ketonkss4) - Identified Selenium's new capabilities, helping reduce dependencies + +## Other Contributors + +- [Gokhan](https://github.com/gkhngyk) +- [Shiv Kumar](https://github.com/shivkumar0757) +- [QIN2DIM](https://github.com/QIN2DIM) + + +## Acknowledgements + +We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better. + +--- + +If you've contributed to Crawl4AI and your name isn't on this list, please [open a pull request](https://github.com/unclecode/crawl4ai/pulls) with your name, link, and contribution, and we'll review it promptly. + +Thank you all for your contributions! \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 07c41ad7..3f74a26a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,9 @@ FROM python:3.10-slim-bookworm # Set the working directory in the container WORKDIR /usr/src/app +# Define build arguments +ARG INSTALL_OPTION=default + # Install build dependencies RUN apt-get update && \ apt-get install -y --no-install-recommends \ @@ -21,33 +24,39 @@ RUN apt-get update && \ # Copy the application code COPY . . -# Install Crawl4AI using the local setup.py (which will use the default installation) -RUN pip install --no-cache-dir . +# Install Crawl4AI using the local setup.py with the specified option +# and download models only for torch, transformer, or all options +RUN if [ "$INSTALL_OPTION" = "all" ]; then \ + pip install --no-cache-dir .[all] && \ + crawl4ai-download-models; \ + elif [ "$INSTALL_OPTION" = "torch" ]; then \ + pip install --no-cache-dir .[torch] && \ + crawl4ai-download-models; \ + elif [ "$INSTALL_OPTION" = "transformer" ]; then \ + pip install --no-cache-dir .[transformer] && \ + crawl4ai-download-models; \ + else \ + pip install --no-cache-dir .; \ + fi -# Install Google Chrome and ChromeDriver +# Install Google Chrome RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list' && \ apt-get update && \ - apt-get install -y google-chrome-stable && \ - wget -O /tmp/chromedriver.zip http://chromedriver.storage.googleapis.com/`curl -sS chromedriver.storage.googleapis.com/LATEST_RELEASE`/chromedriver_linux64.zip && \ - unzip /tmp/chromedriver.zip chromedriver -d /usr/local/bin/ + apt-get install -y google-chrome-stable -# Set environment to use Chrome and ChromeDriver properly +# Set environment to use Chrome properly ENV CHROME_BIN=/usr/bin/google-chrome \ - CHROMEDRIVER=/usr/local/bin/chromedriver \ DISPLAY=:99 \ DBUS_SESSION_BUS_ADDRESS=/dev/null \ PYTHONUNBUFFERED=1 # Ensure the PATH environment variable includes the location of the installed packages -ENV PATH /opt/conda/bin:$PATH +ENV PATH=/opt/conda/bin:$PATH # Make port 80 available to the world outside this container EXPOSE 80 -# Download models call cli "crawl4ai-download-models" -# RUN crawl4ai-download-models - # Install mkdocs RUN pip install mkdocs mkdocs-terminal diff --git a/README.md b/README.md index a2e784b3..c9552984 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Crawl4AI v0.2.74 🕷️🤖 +# Crawl4AI v0.2.76 🕷️🤖 [![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers) [![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members) @@ -8,13 +8,29 @@ Crawl4AI simplifies web crawling and data extraction, making it accessible for large language models (LLMs) and AI applications. 🆓🌐 +#### [v0.2.76] - 2024-08-02 + +Major improvements in functionality, performance, and cross-platform compatibility! 🚀 + +- 🐳 **Docker enhancements**: + - Significantly improved Dockerfile for easy installation on Linux, Mac, and Windows. +- 🌐 **Official Docker Hub image**: + - Launched our first official image on Docker Hub for streamlined deployment (unclecode/crawl4ai). +- 🔧 **Selenium upgrade**: + - Removed dependency on ChromeDriver, now using Selenium's built-in capabilities for better compatibility. +- 🖼️ **Image description**: + - Implemented ability to generate textual descriptions for extracted images from web pages. +- ⚡ **Performance boost**: + - Various improvements to enhance overall speed and performance. + ## Try it Now! -- Use as REST API: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1zODYjhemJ5bUmYceWpVoBMVpd0ofzNBZ?usp=sharing) -- Use as Python library: This collab is a bit outdated. I'm updating it with the newest versions, so please refer to the website for the latest documentation. This will be updated in a few days, and you'll have the latest version here. Thank you so much. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wz8u30rvbq6Scodye9AGCw8Qg_Z8QGsk) +✨ Play around with this [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1sJPAmeLj5PMrg2VgOwMJ2ubGIcK0cJeX?usp=sharing) ✨ visit our [Documentation Website](https://crawl4ai.com/mkdocs/) +✨ Check [Demo](https://crawl4ai.com/mkdocs/demo) + ## Features ✨ - 🆓 Completely free and open-source @@ -32,6 +48,18 @@ Crawl4AI simplifies web crawling and data extraction, making it accessible for l - 🎯 CSS selector support - 📝 Passes instructions/keywords to refine extraction +# Crawl4AI + +## 🌟 Shoutout to Contributors of v0.2.76! + +A big thank you to the amazing contributors who've made this release possible: + +- [@aravindkarnam](https://github.com/aravindkarnam) for the new image description feature +- [@FractalMind](https://github.com/FractalMind) for our official Docker Hub image +- [@ketonkss4](https://github.com/ketonkss4) for helping streamline our Selenium setup + +Your contributions are driving Crawl4AI forward! 🚀 + ## Cool Examples 🚀 ### Quick Start @@ -52,14 +80,33 @@ result = crawler.run(url="https://www.nbcnews.com/business") print(result.markdown) ``` -## How to install 🛠 +## How to install 🛠 + +### Using pip 🐍 ```bash virtualenv venv source venv/bin/activate pip install "crawl4ai @ git+https://github.com/unclecode/crawl4ai.git" -```️ +``` -### Speed-First Design 🚀 +### Using Docker 🐳 + +```bash +# For Mac users (M1/M2) +# docker build --platform linux/amd64 -t crawl4ai . +docker build -t crawl4ai . +docker run -d -p 8000:80 crawl4ai +``` + +### Using Docker Hub 🐳 + +```bash +docker pull unclecode/crawl4ai:latest +docker run -d -p 8000:80 unclecode/crawl4ai:latest +``` + + +## Speed-First Design 🚀 Perhaps the most important design principle for this library is speed. We need to ensure it can handle many links and resources in parallel as quickly as possible. By combining this speed with fast LLMs like Groq, the results will be truly amazing. diff --git a/crawl4ai/chunking_strategy.py b/crawl4ai/chunking_strategy.py index 59006072..d16e4f48 100644 --- a/crawl4ai/chunking_strategy.py +++ b/crawl4ai/chunking_strategy.py @@ -55,7 +55,7 @@ class TopicSegmentationChunking(ChunkingStrategy): def __init__(self, num_keywords=3, **kwargs): import nltk as nl - self.tokenizer = nl.toknize.TextTilingTokenizer() + self.tokenizer = nl.tokenize.TextTilingTokenizer() self.num_keywords = num_keywords def chunk(self, text: str) -> list: diff --git a/crawl4ai/config.py b/crawl4ai/config.py index 77273b78..00b1eb46 100644 --- a/crawl4ai/config.py +++ b/crawl4ai/config.py @@ -27,3 +27,14 @@ WORD_TOKEN_RATE = 1.3 # Threshold for the minimum number of word in a HTML tag to be considered MIN_WORD_THRESHOLD = 1 +IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1 + +# Threshold for the Image extraction - Range is 1 to 6 +# Images are scored based on point based system, to filter based on usefulness. Points are assigned +# to each image based on the following aspects. +# If either height or width exceeds 150px +# If image size is greater than 10Kb +# If alt property is set +# If image format is in jpg, png or webp +# If image is in the first half of the total images extracted from the page +IMAGE_SCORE_THRESHOLD = 2 diff --git a/crawl4ai/crawler_strategy.py b/crawl4ai/crawler_strategy.py index 85ba4450..fb7980d3 100644 --- a/crawl4ai/crawler_strategy.py +++ b/crawl4ai/crawler_strategy.py @@ -6,9 +6,9 @@ from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.chrome.options import Options from selenium.common.exceptions import InvalidArgumentException, WebDriverException -from selenium.webdriver.chrome.service import Service as ChromeService -from webdriver_manager.chrome import ChromeDriverManager -from urllib3.exceptions import MaxRetryError +# from selenium.webdriver.chrome.service import Service as ChromeService +# from webdriver_manager.chrome import ChromeDriverManager +# from urllib3.exceptions import MaxRetryError from .config import * import logging, time @@ -137,10 +137,15 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): # self.service = Service(chromedriver_autoinstaller.install()) - chromedriver_path = ChromeDriverManager().install() - self.service = Service(chromedriver_path) - self.service.log_path = "NUL" - self.driver = webdriver.Chrome(service=self.service, options=self.options) + # chromedriver_path = ChromeDriverManager().install() + # self.service = Service(chromedriver_path) + # self.service.log_path = "NUL" + # self.driver = webdriver.Chrome(service=self.service, options=self.options) + + # Use selenium-manager (built into Selenium 4.10.0+) + self.service = Service() + self.driver = webdriver.Chrome(options=self.options) + self.driver = self.execute_hook('on_driver_created', self.driver) if kwargs.get("cookies"): @@ -292,16 +297,18 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): # Open the screenshot with PIL image = Image.open(BytesIO(screenshot)) + # Convert image to RGB mode (this will handle both RGB and RGBA images) + rgb_image = image.convert('RGB') + # Convert to JPEG and compress buffered = BytesIO() - image.save(buffered, format="JPEG", quality=85) + rgb_image.save(buffered, format="JPEG", quality=85) img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8') if self.verbose: print(f"[LOG] 📸 Screenshot taken and converted to base64") return img_base64 - except Exception as e: error_message = sanitize_input_encode(f"Failed to take screenshot: {str(e)}") print(error_message) @@ -314,7 +321,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): try: font = ImageFont.truetype("arial.ttf", 40) except IOError: - font = ImageFont.load_default(size=40) + font = ImageFont.load_default() # Define text color and wrap the text text_color = (255, 255, 255) @@ -333,6 +340,6 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8') return img_base64 - + def quit(self): - self.driver.quit() \ No newline at end of file + self.driver.quit() diff --git a/crawl4ai/model_loader.py b/crawl4ai/model_loader.py index 7e17f7f9..7b22f0e1 100644 --- a/crawl4ai/model_loader.py +++ b/crawl4ai/model_loader.py @@ -3,7 +3,7 @@ from pathlib import Path import subprocess, os import shutil import tarfile -from crawl4ai.config import MODEL_REPO_BRANCH +from .model_loader import * import argparse import urllib.request __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) diff --git a/crawl4ai/utils.py b/crawl4ai/utils.py index e7b59d65..07832888 100644 --- a/crawl4ai/utils.py +++ b/crawl4ai/utils.py @@ -11,6 +11,9 @@ from .prompts import PROMPT_EXTRACT_BLOCKS from .config import * from pathlib import Path from typing import Dict, Any +from urllib.parse import urljoin +import requests +from requests.exceptions import InvalidSchema class InvalidCSSSelectorError(Exception): pass @@ -435,6 +438,8 @@ def get_content_of_website_optimized(url: str, html: str, word_count_threshold: soup = BeautifulSoup(html, 'html.parser') body = soup.body + + image_description_min_word_threshold = kwargs.get('image_description_min_word_threshold', IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD) if css_selector: selected_elements = body.select(css_selector) @@ -447,6 +452,103 @@ def get_content_of_website_optimized(url: str, html: str, word_count_threshold: links = {'internal': [], 'external': []} media = {'images': [], 'videos': [], 'audios': []} + def process_image(img, url, index, total_images): + #Check if an image has valid display and inside undesired html elements + def is_valid_image(img, parent, parent_classes): + style = img.get('style', '') + src = img.get('src', '') + classes_to_check = ['button', 'icon', 'logo'] + tags_to_check = ['button', 'input'] + return all([ + 'display:none' not in style, + src, + not any(s in var for var in [src, img.get('alt', ''), *parent_classes] for s in classes_to_check), + parent.name not in tags_to_check + ]) + + #Score an image for it's usefulness + def score_image_for_usefulness(img, base_url, index, images_count): + # Function to parse image height/width value and units + def parse_dimension(dimension): + if dimension: + match = re.match(r"(\d+)(\D*)", dimension) + if match: + number = int(match.group(1)) + unit = match.group(2) or 'px' # Default unit is 'px' if not specified + return number, unit + return None, None + + # Fetch image file metadata to extract size and extension + def fetch_image_file_size(img, base_url): + #If src is relative path construct full URL, if not it may be CDN URL + img_url = urljoin(base_url,img.get('src')) + try: + response = requests.head(img_url) + if response.status_code == 200: + return response.headers.get('Content-Length',None) + else: + print(f"Failed to retrieve file size for {img_url}") + return None + except InvalidSchema as e: + return None + finally: + return + + image_height = img.get('height') + height_value, height_unit = parse_dimension(image_height) + image_width = img.get('width') + width_value, width_unit = parse_dimension(image_width) + image_size = 0 #int(fetch_image_file_size(img,base_url) or 0) + image_format = os.path.splitext(img.get('src',''))[1].lower() + # Remove . from format + image_format = image_format.strip('.') + score = 0 + if height_value: + if height_unit == 'px' and height_value > 150: + score += 1 + if height_unit in ['%','vh','vmin','vmax'] and height_value >30: + score += 1 + if width_value: + if width_unit == 'px' and width_value > 150: + score += 1 + if width_unit in ['%','vh','vmin','vmax'] and width_value >30: + score += 1 + if image_size > 10000: + score += 1 + if img.get('alt') != '': + score+=1 + if any(image_format==format for format in ['jpg','png','webp']): + score+=1 + if index/images_count<0.5: + score+=1 + return score + + # Extract meaningful text for images from closest parent + def find_closest_parent_with_useful_text(tag): + current_tag = tag + while current_tag: + current_tag = current_tag.parent + # Get the text content of the parent tag + if current_tag: + text_content = current_tag.get_text(separator=' ',strip=True) + # Check if the text content has at least word_count_threshold + if len(text_content.split()) >= image_description_min_word_threshold: + return text_content + return None + + if not is_valid_image(img, img.parent, img.parent.get('class', [])): + return None + score = score_image_for_usefulness(img, url, index, total_images) + if score <= IMAGE_SCORE_THRESHOLD: + return None + return { + 'src': img.get('src', ''), + 'alt': img.get('alt', ''), + 'desc': find_closest_parent_with_useful_text(img), + 'score': score, + 'type': 'image' + } + def process_element(element: element.PageElement) -> bool: try: if isinstance(element, NavigableString): @@ -471,11 +573,6 @@ def get_content_of_website_optimized(url: str, html: str, word_count_threshold: keep_element = True elif element.name == 'img': - media['images'].append({ - 'src': element.get('src'), - 'alt': element.get('alt'), - 'type': 'image' - }) return True # Always keep image elements elif element.name in ['video', 'audio']: @@ -518,6 +615,14 @@ def get_content_of_website_optimized(url: str, html: str, word_count_threshold: print('Error processing element:', str(e)) return False + #process images by filtering and extracting contextual text from the page + imgs = body.find_all('img') + media['images'] = [ + result for result in + (process_image(img, url, i, len(imgs)) for i, img in enumerate(imgs)) + if result is not None + ] + process_element(body) def flatten_nested_elements(node): diff --git a/docs/examples/llm_extraction_openai_pricing.py b/docs/examples/llm_extraction_openai_pricing.py index 9330ad31..d05a1b6b 100644 --- a/docs/examples/llm_extraction_openai_pricing.py +++ b/docs/examples/llm_extraction_openai_pricing.py @@ -21,7 +21,8 @@ result = crawler.run( url=url, word_count_threshold=1, extraction_strategy= LLMExtractionStrategy( - provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'), + # provider= "openai/gpt-4o", api_token = os.getenv('OPENAI_API_KEY'), + provider= "groq/llama-3.1-70b-versatile", api_token = os.getenv('GROQ_API_KEY'), schema=OpenAIModelFee.model_json_schema(), extraction_type="schema", instruction="From the crawled content, extract all mentioned model names along with their "\ diff --git a/docs/md/changelog.md b/docs/md/changelog.md index b0eb7c0d..80e3ce8e 100644 --- a/docs/md/changelog.md +++ b/docs/md/changelog.md @@ -1,5 +1,32 @@ # Changelog +## [v0.2.76] - 2024-08-02 + +Major improvements in functionality, performance, and cross-platform compatibility! 🚀 + +- 🐳 **Docker enhancements**: Significantly improved Dockerfile for easy installation on Linux, Mac, and Windows. +- 🌐 **Official Docker Hub image**: Launched our first official image on Docker Hub for streamlined deployment. +- 🔧 **Selenium upgrade**: Removed dependency on ChromeDriver, now using Selenium's built-in capabilities for better compatibility. +- 🖼️ **Image description**: Implemented ability to generate textual descriptions for extracted images from web pages. +- ⚡ **Performance boost**: Various improvements to enhance overall speed and performance. + +A big shoutout to our amazing community contributors: +- [@aravindkarnam](https://github.com/aravindkarnam) for developing the textual description extraction feature. +- [@FractalMind](https://github.com/FractalMind) for creating the first official Docker Hub image and fixing Dockerfile errors. +- [@ketonkss4](https://github.com/ketonkss4) for identifying Selenium's new capabilities, helping us reduce dependencies. + +Your contributions are driving Crawl4AI forward! 🙌 + +## [v0.2.75] - 2024-07-19 + +Minor improvements for a more maintainable codebase: + +- 🔄 Fixed typos in `chunking_strategy.py` and `crawler_strategy.py` to improve code readability +- 🔄 Removed `.test_pads/` directory from `.gitignore` to keep our repository clean and organized + +These changes may seem small, but they contribute to a more stable and sustainable codebase. By fixing typos and updating our `.gitignore` settings, we're ensuring that our code is easier to maintain and scale in the long run. + + ## v0.2.74 - 2024-07-08 A slew of exciting updates to improve the crawler's stability and robustness! 🎉 diff --git a/docs/md/index.md b/docs/md/index.md index b483234f..8c4abb48 100644 --- a/docs/md/index.md +++ b/docs/md/index.md @@ -1,4 +1,4 @@ -# Crawl4AI v0.2.74 +# Crawl4AI v0.2.76 Welcome to the official documentation for Crawl4AI! 🕷️🤖 Crawl4AI is an open-source Python library designed to simplify web crawling and extract useful information from web pages. This documentation will guide you through the features, usage, and customization of Crawl4AI. diff --git a/docs/md/installation.md b/docs/md/installation.md index 1f26ed55..8ff0ba1f 100644 --- a/docs/md/installation.md +++ b/docs/md/installation.md @@ -2,11 +2,13 @@ There are three ways to use Crawl4AI: -1. As a library (Recommended) -2. As a local server (Docker) or using the REST API -3. As a Google Colab notebook. +1. As a library (Recommended). +2. As a local server (Docker) or using the REST API. +3. As a local server (Docker) using the pre-built image from Docker Hub. -## Library Installation +## Option 1: Library Installation + +You can try this Colab for a quick start: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1sJPAmeLj5PMrg2VgOwMJ2ubGIcK0cJeX#scrollTo=g1RrmI4W_rPk) Crawl4AI offers flexible installation options to suit various use cases. Choose the option that best fits your needs: @@ -57,23 +59,135 @@ Use this if you plan to modify the source code. crawl4ai-download-models ``` -## Using Docker for Local Server +## Option 2: Using Docker for Local Server -To run Crawl4AI as a local server using Docker: +Crawl4AI can be run as a local server using Docker. The Dockerfile supports different installation options to cater to various use cases. Here's how you can build and run the Docker image: + +### Default Installation + +The default installation includes the basic Crawl4AI package without additional dependencies or pre-downloaded models. ```bash -# For Mac users -# docker build --platform linux/amd64 -t crawl4ai . +# For Mac users (M1/M2) +docker build --platform linux/amd64 -t crawl4ai . + # For other users -# docker build -t crawl4ai . +docker build -t crawl4ai . + +# Run the container docker run -d -p 8000:80 crawl4ai ``` -## Using Google Colab +### Full Installation (All Dependencies and Models) + +This option installs all dependencies and downloads the models. + +```bash +# For Mac users (M1/M2) +docker build --platform linux/amd64 --build-arg INSTALL_OPTION=all -t crawl4ai:all . + +# For other users +docker build --build-arg INSTALL_OPTION=all -t crawl4ai:all . + +# Run the container +docker run -d -p 8000:80 crawl4ai:all +``` + +### Torch Installation + +This option installs torch-related dependencies and downloads the models. + +```bash +# For Mac users (M1/M2) +docker build --platform linux/amd64 --build-arg INSTALL_OPTION=torch -t crawl4ai:torch . + +# For other users +docker build --build-arg INSTALL_OPTION=torch -t crawl4ai:torch . + +# Run the container +docker run -d -p 8000:80 crawl4ai:torch +``` + +### Transformer Installation + +This option installs transformer-related dependencies and downloads the models. + +```bash +# For Mac users (M1/M2) +docker build --platform linux/amd64 --build-arg INSTALL_OPTION=transformer -t crawl4ai:transformer . + +# For other users +docker build --build-arg INSTALL_OPTION=transformer -t crawl4ai:transformer . + +# Run the container +docker run -d -p 8000:80 crawl4ai:transformer +``` + +### Notes + +- The `--platform linux/amd64` flag is necessary for Mac users with M1/M2 chips to ensure compatibility. +- The `-t` flag tags the image with a name (and optionally a tag in the 'name:tag' format). +- The `-d` flag runs the container in detached mode. +- The `-p 8000:80` flag maps port 8000 on the host to port 80 in the container. + +Choose the installation option that best suits your needs. The default installation is suitable for basic usage, while the other options provide additional capabilities for more advanced use cases. + +## Option 3: Using the Pre-built Image from Docker Hub + +You can use pre-built Crawl4AI images from Docker Hub, which are available for all platforms (Mac, Linux, Windows). We have official images as well as a community-contributed image (Thanks to https://github.com/FractalMind): + +### Default Installation + +```bash + +# Pull the image + +docker pull unclecode/crawl4ai:latest + +# Run the container + +docker run -d -p 8000:80 unclecode/crawl4ai:latest + +``` + +### Community-Contributed Image + +A stable version of Crawl4AI is also available, created and maintained by a community member: + +```bash + +# Pull the community-contributed image + +docker pull ryser007/crawl4ai:stable + +# Run the container + +docker run -d -p 8000:80 ryser007/crawl4ai:stable + +``` + +We'd like to express our gratitude to GitHub user [@FractalMind](https://github.com/FractalMind) for creating and maintaining this stable version of the Crawl4AI Docker image. Community contributions like this are invaluable to the project. -You can also use Crawl4AI in a Google Colab notebook for easy setup and experimentation. Simply open the following Colab notebook and follow the instructions: +### Testing the Installation - ⚠️ This collab is a bit outdated. I'm updating it with the newest versions, so please refer to the website for the latest documentation. This will be updated in a few days, and you'll have the latest version here. Thank you so much. +After running the container, you can test if it's working correctly: + +- On Mac and Linux: + + ```bash + + curl http://localhost:8000 + + ``` + +- On Windows (PowerShell): + + ```powershell + + Invoke-WebRequest -Uri http://localhost:8000 + + ``` + + Or open a web browser and navigate to http://localhost:8000 -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wz8u30rvbq6Scodye9AGCw8Qg_Z8QGsk) \ No newline at end of file diff --git a/docs/md/introduction.md b/docs/md/introduction.md index d4a13081..6d1ad56b 100644 --- a/docs/md/introduction.md +++ b/docs/md/introduction.md @@ -20,18 +20,6 @@ Crawl4AI is designed to simplify the process of crawling web pages and extractin - **🎯 CSS Selector Support**: Extract specific content using CSS selectors. - **📝 Instruction/Keyword Refinement**: Pass instructions or keywords to refine the extraction process. -## Recent Changes (v0.2.5) 🌟 - -- **New Hooks**: Added six important hooks to the crawler: - - 🟢 `on_driver_created`: Called when the driver is ready for initializations. - - 🔵 `before_get_url`: Called right before Selenium fetches the URL. - - 🟣 `after_get_url`: Called after Selenium fetches the URL. - - 🟠 `before_return_html`: Called when the data is parsed and ready. - - 🟡 `on_user_agent_updated`: Called when the user changes the user agent, causing the driver to reinitialize. -- **New Example**: Added an example in [`quickstart.py`](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/quickstart.py) in the example folder under the docs. -- **Improved Semantic Context**: Maintaining the semantic context of inline tags (e.g., abbreviation, DEL, INS) for improved LLM-friendliness. -- **Dockerfile Update**: Updated Dockerfile to ensure compatibility across multiple platforms. - Check the [Changelog](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md) for more details. ## Power and Simplicity of Crawl4AI 🚀 diff --git a/requirements.txt b/requirements.txt index ced41173..2574cf60 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,12 +12,13 @@ python-dotenv==1.0.1 requests==2.32.3 rich==13.7.1 scikit-learn==1.5.0 -selenium==4.21.0 +selenium==4.23.1 uvicorn==0.30.1 transformers==4.41.2 -chromedriver-autoinstaller==0.6.4 +# webdriver-manager==4.0.1 +# chromedriver-autoinstaller==0.6.4 torch==2.3.1 onnxruntime==1.18.0 tokenizers==0.19.1 pillow==10.3.0 -webdriver-manager==4.0.1 \ No newline at end of file +slowapi==0.1.9 \ No newline at end of file diff --git a/setup.py b/setup.py index 4a2c346d..5918c721 100644 --- a/setup.py +++ b/setup.py @@ -1,18 +1,18 @@ from setuptools import setup, find_packages import os from pathlib import Path -import subprocess -from setuptools.command.install import install +import shutil # Create the .crawl4ai folder in the user's home directory if it doesn't exist # If the folder already exists, remove the cache folder -crawl4ai_folder = os.path.join(Path.home(), ".crawl4ai") -if os.path.exists(f"{crawl4ai_folder}/cache"): - subprocess.run(["rm", "-rf", f"{crawl4ai_folder}/cache"]) -os.makedirs(crawl4ai_folder, exist_ok=True) -os.makedirs(f"{crawl4ai_folder}/cache", exist_ok=True) +crawl4ai_folder = Path.home() / ".crawl4ai" +cache_folder = crawl4ai_folder / "cache" +if cache_folder.exists(): + shutil.rmtree(cache_folder) +crawl4ai_folder.mkdir(exist_ok=True) +cache_folder.mkdir(exist_ok=True) # Read the requirements from requirements.txt with open("requirements.txt") as f: @@ -25,7 +25,7 @@ transformer_requirements = [req for req in requirements if req.startswith(("tran setup( name="Crawl4AI", - version="0.2.74", + version="0.2.76", description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown",