From bf00c26a8385ce17c5fee0f6bca0d780f954ae29 Mon Sep 17 00:00:00 2001 From: Unclecode Date: Sat, 18 May 2024 09:16:52 +0000 Subject: [PATCH] chore: Update Dockerfile to install chromium-chromedriver and spacy library --- .gitignore | 2 ++ Dockerfile | 17 ++++++++--------- Dockerfile_mac | 44 ++++++++++++++++++++++++++++++++++++++++++++ README.md | 8 +++++--- main.py | 9 ++++++++- 5 files changed, 67 insertions(+), 13 deletions(-) create mode 100644 Dockerfile_mac diff --git a/.gitignore b/.gitignore index fd1fd196..9aac8182 100644 --- a/.gitignore +++ b/.gitignore @@ -172,3 +172,5 @@ Crawl4AI.egg-info/ requirements0.txt a.txt + +*.sh \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 0d08e17e..2c1e7927 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,9 +7,6 @@ WORKDIR /usr/src/app # Copy the current directory contents into the container at /usr/src/app COPY . . -# Install any needed packages specified in requirements.txt -RUN pip install --no-cache-dir -r requirements.txt - # Install dependencies for Chrome and ChromeDriver RUN apt-get update && apt-get install -y --no-install-recommends \ wget \ @@ -20,15 +17,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ apt-transport-https \ software-properties-common \ - && wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ - && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ + && mkdir -p /etc/apt/keyrings \ + && curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | gpg --dearmor -o /etc/apt/keyrings/google-linux-signing-keyring.gpg \ + && echo 'deb [arch=amd64 signed-by=/etc/apt/keyrings/google-linux-signing-keyring.gpg] http://dl.google.com/linux/chrome/deb/ stable main' | tee /etc/apt/sources.list.d/google-chrome.list \ && apt-get update \ && apt-get install -y google-chrome-stable \ && rm -rf /var/lib/apt/lists/* \ - && apt install chromium-chromedriver -y + && apt-get install -y chromium-chromedriver -# Install spacy library using pip -RUN pip install spacy +# Install Python dependencies +RUN pip install --no-cache-dir -r requirements.txt +RUN pip install spacy torch torchvision torchaudio # Set display port and dbus env to avoid hanging ENV DISPLAY=:99 @@ -41,4 +40,4 @@ EXPOSE 80 ENV PYTHONUNBUFFERED 1 # Run uvicorn -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4"] \ No newline at end of file +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4"] diff --git a/Dockerfile_mac b/Dockerfile_mac new file mode 100644 index 00000000..0d08e17e --- /dev/null +++ b/Dockerfile_mac @@ -0,0 +1,44 @@ +# Use an official Python runtime as a parent image +FROM python:3.10-slim + +# Set the working directory in the container +WORKDIR /usr/src/app + +# Copy the current directory contents into the container at /usr/src/app +COPY . . + +# Install any needed packages specified in requirements.txt +RUN pip install --no-cache-dir -r requirements.txt + +# Install dependencies for Chrome and ChromeDriver +RUN apt-get update && apt-get install -y --no-install-recommends \ + wget \ + xvfb \ + unzip \ + curl \ + gnupg2 \ + ca-certificates \ + apt-transport-https \ + software-properties-common \ + && wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ + && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ + && apt-get update \ + && apt-get install -y google-chrome-stable \ + && rm -rf /var/lib/apt/lists/* \ + && apt install chromium-chromedriver -y + +# Install spacy library using pip +RUN pip install spacy + +# Set display port and dbus env to avoid hanging +ENV DISPLAY=:99 +ENV DBUS_SESSION_BUS_ADDRESS=/dev/null + +# Make port 80 available to the world outside this container +EXPOSE 80 + +# Define environment variable +ENV PYTHONUNBUFFERED 1 + +# Run uvicorn +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4"] \ No newline at end of file diff --git a/README.md b/README.md index de956f5c..6871adf6 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,9 @@ To show the simplicity take a look at the first example: from crawl4ai import WebCrawler # Create the WebCrawler instance -crawler = WebCrawler() +crawler = WebCrawler() + + # Run the crawler with keyword filtering and CSS selector result = crawler.run(url="https://www.nbcnews.com/business") @@ -156,11 +158,11 @@ pip install -e .[all] docker run -d -p 8000:80 crawl4ai ``` -For more information about how to run Crawl4AI as a local server, please refer to the [GitHub repository](https://github.com/unclecode/crawl4ai). + ## Using the Local server ot REST API 🌐 -You can also use Crawl4AI through the REST API. This method allows you to send HTTP requests to the Crawl4AI server and receive structured data in response. The base URL for the API is `https://crawl4ai.com/crawl` [COMING SOON]. If you run the local server, you can use `http://localhost:8000/crawl`. (Port is dependent on your docker configuration) +You can also use Crawl4AI through the REST API. This method allows you to send HTTP requests to the Crawl4AI server and receive structured data in response. The base URL for the API is `https://crawl4ai.com/crawl` [Available now, on a CPU server, of course will be faster on GPU]. If you run the local server, you can use `http://localhost:8000/crawl`. (Port is dependent on your docker configuration) ### Example Usage diff --git a/main.py b/main.py index 45e37515..20ee0acb 100644 --- a/main.py +++ b/main.py @@ -2,6 +2,8 @@ import os import importlib import asyncio from functools import lru_cache +import logging +logging.basicConfig(level=logging.DEBUG) from fastapi import FastAPI, HTTPException, Request from fastapi.responses import HTMLResponse, JSONResponse @@ -77,7 +79,7 @@ async def get_total_url_count(): # Add endpoit to clear db @app.get("/clear-db") async def clear_database(): - clear_db() + # clear_db() return JSONResponse(content={"message": "Database cleared."}) def import_strategy(module_name: str, class_name: str, *args, **kwargs): @@ -86,12 +88,15 @@ def import_strategy(module_name: str, class_name: str, *args, **kwargs): strategy_class = getattr(module, class_name) return strategy_class(*args, **kwargs) except ImportError: + print("ImportError: Module not found.") raise HTTPException(status_code=400, detail=f"Module {module_name} not found.") except AttributeError: + print("AttributeError: Class not found.") raise HTTPException(status_code=400, detail=f"Class {class_name} not found in {module_name}.") @app.post("/crawl") async def crawl_urls(crawl_request: CrawlRequest, request: Request): + logging.debug(f"[LOG] Crawl request for URL: {crawl_request.urls}") global current_requests async with lock: if current_requests >= MAX_CONCURRENT_REQUESTS: @@ -99,10 +104,12 @@ async def crawl_urls(crawl_request: CrawlRequest, request: Request): current_requests += 1 try: + logging.debug("[LOG] Loading extraction and chunking strategies...") extraction_strategy = import_strategy("crawl4ai.extraction_strategy", crawl_request.extraction_strategy, **crawl_request.extraction_strategy_args) chunking_strategy = import_strategy("crawl4ai.chunking_strategy", crawl_request.chunking_strategy, **crawl_request.chunking_strategy_args) # Use ThreadPoolExecutor to run the synchronous WebCrawler in async manner + logging.debug("[LOG] Running the WebCrawler...") with ThreadPoolExecutor() as executor: loop = asyncio.get_event_loop() futures = [