diff --git a/CHANGELOG.md b/CHANGELOG.md index 57bb8614..08705a20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,26 @@ # Changelog -## [0.2.71] 2024-06-26 -β€’ Refactored `crawler_strategy.py` to handle exceptions and improve error messages -β€’ Improved `get_content_of_website_optimized` function in `utils.py` for better performance -β€’ Updated `utils.py` with latest changes -β€’ Migrated to `ChromeDriverManager` for resolving Chrome driver download issues +## [v0.2.72] - 2024-06-30 + +This release brings exciting updates and improvements to our project! πŸŽ‰ + +* πŸ“š **Documentation Updates**: Our documentation has been revamped to reflect the latest changes and additions. +* πŸš€ **New Modes in setup.py**: We've added support for three new modes in setup.py: default, torch, and transformers. This enhances the project's flexibility and usability. +* 🐳 **Docker File Updates**: The Docker file has been updated to ensure seamless compatibility with the new modes and improvements. +* πŸ•·οΈ **Temporary Solution for Headless Crawling**: We've implemented a temporary solution to overcome issues with crawling websites in headless mode. + +These changes aim to improve the overall user experience, provide more flexibility, and enhance the project's performance. We're thrilled to share these updates with you and look forward to continuing to evolve and improve our project! + +## [0.2.71] - 2024-06-26 + +**Improved Error Handling and Performance** 🚧 + +* 🚫 Refactored `crawler_strategy.py` to handle exceptions and provide better error messages, making it more robust and reliable. +* πŸ’» Optimized the `get_content_of_website_optimized` function in `utils.py` for improved performance, reducing potential bottlenecks. +* πŸ’» Updated `utils.py` with the latest changes, ensuring consistency and accuracy. +* 🚫 Migrated to `ChromeDriverManager` to resolve Chrome driver download issues, providing a smoother user experience. + +These changes focus on refining the existing codebase, resulting in a more stable, efficient, and user-friendly experience. With these improvements, you can expect fewer errors and better performance in the crawler strategy and utility functions. ## [0.2.71] - 2024-06-25 ### Fixed diff --git a/Dockerfile b/Dockerfile index 2131ccf7..07c41ad7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,12 +18,11 @@ RUN apt-get update && \ software-properties-common && \ rm -rf /var/lib/apt/lists/* -# Install Python dependencies -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt && \ - pip install --no-cache-dir spacy torch onnxruntime uvicorn && \ - python -m spacy download en_core_web_sm - # pip install --no-cache-dir spacy torch torchvision torchaudio onnxruntime uvicorn && \ +# Copy the application code +COPY . . + +# Install Crawl4AI using the local setup.py (which will use the default installation) +RUN pip install --no-cache-dir . # Install Google Chrome and ChromeDriver RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - && \ @@ -33,9 +32,6 @@ RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key wget -O /tmp/chromedriver.zip http://chromedriver.storage.googleapis.com/`curl -sS chromedriver.storage.googleapis.com/LATEST_RELEASE`/chromedriver_linux64.zip && \ unzip /tmp/chromedriver.zip chromedriver -d /usr/local/bin/ -# Copy the rest of the application code -COPY . . - # Set environment to use Chrome and ChromeDriver properly ENV CHROME_BIN=/usr/bin/google-chrome \ CHROMEDRIVER=/usr/local/bin/chromedriver \ @@ -43,9 +39,6 @@ ENV CHROME_BIN=/usr/bin/google-chrome \ DBUS_SESSION_BUS_ADDRESS=/dev/null \ PYTHONUNBUFFERED=1 -# pip install -e .[all] -RUN pip install --no-cache-dir -e .[all] - # Ensure the PATH environment variable includes the location of the installed packages ENV PATH /opt/conda/bin:$PATH @@ -53,15 +46,13 @@ ENV PATH /opt/conda/bin:$PATH EXPOSE 80 # Download models call cli "crawl4ai-download-models" -RUN crawl4ai-download-models +# RUN crawl4ai-download-models -# Instakk mkdocs +# Install mkdocs RUN pip install mkdocs mkdocs-terminal # Call mkdocs to build the documentation RUN mkdocs build # Run uvicorn -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4"] - - +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4"] \ No newline at end of file diff --git a/README.md b/README.md index f910c829..b4d2c7c5 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Crawl4AI v0.2.71 πŸ•·οΈπŸ€– +# Crawl4AI v0.2.72 πŸ•·οΈπŸ€– [![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers) [![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members) @@ -52,6 +52,13 @@ result = crawler.run(url="https://www.nbcnews.com/business") print(result.markdown) ``` +## How to install πŸ›  +```bash +virtualenv venv +source venv/bin/activate +pip install "crawl4ai @ git+https://github.com/unclecode/crawl4ai.git" +```️ + ### Speed-First Design πŸš€ Perhaps the most important design principle for this library is speed. We need to ensure it can handle many links and resources in parallel as quickly as possible. By combining this speed with fast LLMs like Groq, the results will be truly amazing. diff --git a/crawl4ai/crawler_strategy.py b/crawl4ai/crawler_strategy.py index 4f6190c9..cd94e9e7 100644 --- a/crawl4ai/crawler_strategy.py +++ b/crawl4ai/crawler_strategy.py @@ -83,14 +83,20 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): if kwargs.get("user_agent"): self.options.add_argument("--user-agent=" + kwargs.get("user_agent")) else: - # Set user agent user_agent = kwargs.get("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36") - self.options.add_argument(f"--user-agent={user_agent}") - - self.options.add_argument("--no-sandbox") + self.options.add_argument(f"--user-agent={user_agent}") + self.options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36") + self.options.headless = kwargs.get("headless", True) if self.options.headless: self.options.add_argument("--headless") + + self.options.add_argument("--disable-gpu") + self.options.add_argument("--window-size=1920,1080") + self.options.add_argument("--no-sandbox") + self.options.add_argument("--disable-dev-shm-usage") + self.options.add_argument("--disable-blink-features=AutomationControlled") + # self.options.add_argument("--disable-dev-shm-usage") self.options.add_argument("--disable-gpu") # self.options.add_argument("--disable-extensions") @@ -187,10 +193,24 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): self.driver = self.execute_hook('before_get_url', self.driver) if self.verbose: print(f"[LOG] πŸ•ΈοΈ Crawling {url} using LocalSeleniumCrawlerStrategy...") - self.driver.get(url) + self.driver.get(url) # + html = self.driver.page_source WebDriverWait(self.driver, 10).until( - EC.presence_of_all_elements_located((By.TAG_NAME, "html")) + EC.presence_of_all_elements_located((By.TAG_NAME, "body")) ) + can_not_be_done_headless = False # Look at my creativity for naming variables + # TODO: Very ugly way for now but it works + if html == "": + can_not_be_done_headless = True + options = Options() + options.headless = False + # set window size very small + options.add_argument("--window-size=10,10") + driver = webdriver.Chrome(service=self.service, options=options) + driver.get(url) + html = driver.page_source + driver.quit() + self.driver = self.execute_hook('after_get_url', self.driver) # Execute JS code if provided @@ -207,7 +227,8 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): lambda driver: driver.execute_script("return document.readyState") == "complete" ) - html = self.driver.page_source + if not can_not_be_done_headless: + html = self.driver.page_source self.driver = self.execute_hook('before_return_html', self.driver, html) # Store in cache diff --git a/crawl4ai/extraction_strategy.py b/crawl4ai/extraction_strategy.py index aadcda20..f635f60b 100644 --- a/crawl4ai/extraction_strategy.py +++ b/crawl4ai/extraction_strategy.py @@ -10,7 +10,7 @@ from functools import partial from .model_loader import * import math -import numpy as np + class ExtractionStrategy(ABC): """ Abstract base class for all extraction strategies. @@ -219,6 +219,8 @@ class CosineStrategy(ExtractionStrategy): """ super().__init__() + import numpy as np + self.semantic_filter = semantic_filter self.word_count_threshold = word_count_threshold self.max_dist = max_dist diff --git a/docs/md/changelog.md b/docs/md/changelog.md index 6f9ac706..8a2e929e 100644 --- a/docs/md/changelog.md +++ b/docs/md/changelog.md @@ -1,10 +1,26 @@ # Changelog -## [0.2.71] 2024-06-26 -β€’ Refactored `crawler_strategy.py` to handle exceptions and improve error messages -β€’ Improved `get_content_of_website_optimized` function in `utils.py` for better performance -β€’ Updated `utils.py` with latest changes -β€’ Migrated to `ChromeDriverManager` for resolving Chrome driver download issues +## [v0.2.72] - 2024-06-30 + +This release brings exciting updates and improvements to our project! πŸŽ‰ + +* πŸ“š **Documentation Updates**: Our documentation has been revamped to reflect the latest changes and additions. +* πŸš€ **New Modes in setup.py**: We've added support for three new modes in setup.py: default, torch, and transformers. This enhances the project's flexibility and usability. +* 🐳 **Docker File Updates**: The Docker file has been updated to ensure seamless compatibility with the new modes and improvements. +* πŸ•·οΈ **Temporary Solution for Headless Crawling**: We've implemented a temporary solution to overcome issues with crawling websites in headless mode. + +These changes aim to improve the overall user experience, provide more flexibility, and enhance the project's performance. We're thrilled to share these updates with you and look forward to continuing to evolve and improve our project! + +## [0.2.71] - 2024-06-26 + +**Improved Error Handling and Performance** 🚧 + +* 🚫 Refactored `crawler_strategy.py` to handle exceptions and provide better error messages, making it more robust and reliable. +* πŸ’» Optimized the `get_content_of_website_optimized` function in `utils.py` for improved performance, reducing potential bottlenecks. +* πŸ’» Updated `utils.py` with the latest changes, ensuring consistency and accuracy. +* 🚫 Migrated to `ChromeDriverManager` to resolve Chrome driver download issues, providing a smoother user experience. + +These changes focus on refining the existing codebase, resulting in a more stable, efficient, and user-friendly experience. With these improvements, you can expect fewer errors and better performance in the crawler strategy and utility functions. ## [0.2.71] - 2024-06-25 ### Fixed diff --git a/docs/md/index.md b/docs/md/index.md index f9c25a42..21fcdeb0 100644 --- a/docs/md/index.md +++ b/docs/md/index.md @@ -1,4 +1,4 @@ -# Crawl4AI v0.2.71 +# Crawl4AI v0.2.72 Welcome to the official documentation for Crawl4AI! πŸ•·οΈπŸ€– Crawl4AI is an open-source Python library designed to simplify web crawling and extract useful information from web pages. This documentation will guide you through the features, usage, and customization of Crawl4AI. diff --git a/docs/md/installation.md b/docs/md/installation.md index 7e705b7b..f733856a 100644 --- a/docs/md/installation.md +++ b/docs/md/installation.md @@ -7,33 +7,60 @@ There are three ways to use Crawl4AI: ## Library Installation -To install Crawl4AI as a library, follow these steps: +Crawl4AI offers flexible installation options to suit various use cases. Choose the option that best fits your needs: -1. Install the package from GitHub: +- **Default Installation** (Basic functionality): +```bash +virtualenv venv +source venv/bin/activate +pip install "crawl4ai @ git+https://github.com/unclecode/crawl4ai.git" ``` +Use this for basic web crawling and scraping tasks. + +- **Installation with PyTorch** (For advanced text clustering): +```bash +virtualenv venv +source venv/bin/activate +pip install "crawl4ai[torch] @ git+https://github.com/unclecode/crawl4ai.git" +``` +Choose this if you need the CosineSimilarity cluster strategy. + +- **Installation with Transformers** (For summarization and Hugging Face models): +```bash +virtualenv venv +source venv/bin/activate +pip install "crawl4ai[transformer] @ git+https://github.com/unclecode/crawl4ai.git" +``` +Opt for this if you require text summarization or plan to use Hugging Face models. + +- **Full Installation** (All features): +```bash virtualenv venv source venv/bin/activate pip install "crawl4ai[all] @ git+https://github.com/unclecode/crawl4ai.git" ``` +This installs all dependencies for full functionality. -πŸ’‘ Better to run the following CLI-command to load the required models. This is optional, but it will boost the performance and speed of the crawler. You need to do this only once. -``` -crawl4ai-download-models -``` - -2. Alternatively, you can clone the repository and install the package locally: -``` +- **Development Installation** (For contributors): +```bash virtualenv venv source venv/bin/activate git clone https://github.com/unclecode/crawl4ai.git cd crawl4ai -pip install -e .[all] +pip install -e ".[all]" +``` +Use this if you plan to modify the source code. + +πŸ’‘ After installation, if you have used "torch", "transformer" or "all", it's recommended to run the following CLI command to load the required models. This is optional but will boost the performance and speed of the crawler. You need to do this only once, this is only for when you install using [] +```bash +crawl4ai-download-models ``` ## Using Docker for Local Server -3. Use Docker to run the local server: -``` +To run Crawl4AI as a local server using Docker: + +```bash # For Mac users # docker build --platform linux/amd64 -t crawl4ai . # For other users @@ -43,4 +70,4 @@ docker run -d -p 8000:80 crawl4ai ## Using Google Colab -You can also use Crawl4AI in a Google Colab notebook for easy setup and experimentation. Simply open the following Colab notebook and follow the instructions: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wz8u30rvbq6Scodye9AGCw8Qg_Z8QGsk) +You can also use Crawl4AI in a Google Colab notebook for easy setup and experimentation. Simply open the following Colab notebook and follow the instructions: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1wz8u30rvbq6Scodye9AGCw8Qg_Z8QGsk) \ No newline at end of file diff --git a/setup.py b/setup.py index a11abc2e..cfbeaddf 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,5 @@ from setuptools import setup, find_packages import os -import sys from pathlib import Path import subprocess from setuptools.command.install import install @@ -14,16 +13,10 @@ os.makedirs(f"{crawl4ai_folder}/cache", exist_ok=True) with open("requirements.txt") as f: requirements = f.read().splitlines() -# Read the requirements from requirements.txt -with open("requirements.crawl.txt") as f: - requirements_crawl_only = f.read().splitlines() - # Define the requirements for different environments -requirements_without_torch = [req for req in requirements if not req.startswith("torch")] -requirements_without_transformers = [req for req in requirements if not req.startswith("transformers")] -requirements_without_nltk = [req for req in requirements if not req.startswith("nltk")] -requirements_without_torch_transformers_nlkt = [req for req in requirements if not req.startswith("torch") and not req.startswith("transformers") and not req.startswith("nltk")] -requirements_crawl_only = [req for req in requirements if not req.startswith("torch") and not req.startswith("transformers") and not req.startswith("nltk")] +default_requirements = [req for req in requirements if not req.startswith(("torch", "transformers", "onnxruntime", "nltk", "spacy", "tokenizers", "scikit-learn", "numpy"))] +torch_requirements = [req for req in requirements if req.startswith(("torch", "nltk", "spacy", "scikit-learn", "numpy"))] +transformer_requirements = [req for req in requirements if req.startswith(("transformers", "tokenizers", "onnxruntime"))] class CustomInstallCommand(install): """Customized setuptools install command to install spacy without dependencies.""" @@ -33,7 +26,7 @@ class CustomInstallCommand(install): setup( name="Crawl4AI", - version="0.2.71", + version="0.2.72", description="πŸ”₯πŸ•·οΈ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper", long_description=open("README.md").read(), long_description_content_type="text/markdown", @@ -42,11 +35,11 @@ setup( author_email="unclecode@kidocode.com", license="MIT", packages=find_packages(), - install_requires=requirements_without_torch_transformers_nlkt, + install_requires=default_requirements, extras_require={ - "all": requirements, # Include all requirements - "colab": requirements_without_torch, # Exclude torch for Colab - "crawl": requirements_crawl_only, # Include only crawl requirements + "torch": torch_requirements, + "transformer": transformer_requirements, + "all": requirements, }, cmdclass={ 'install': CustomInstallCommand, @@ -67,4 +60,4 @@ setup( "Programming Language :: Python :: 3.10", ], python_requires=">=3.7", -) +) \ No newline at end of file