feat: Bump version to v0.2.73 and update documentation

This commit updates the version number to v0.2.73 and makes corresponding changes in the README.md and Dockerfile.

Docker file install the default mode, this resolve many of installation issues.

Additionally, the installation instructions are updated to include support for different modes. Setup.py doesn't have anymore dependancy on Spacy.

The change log is also updated to reflect these changes.

Supporting websites need with-head browser.
This commit is contained in:
unclecode
2024-07-03 15:19:22 +08:00
parent 88d8cd8650
commit 9926eb9f95
8 changed files with 34 additions and 25 deletions

View File

@@ -1,4 +1,4 @@
# Crawl4AI v0.2.72 🕷️🤖 # Crawl4AI v0.2.73 🕷️🤖
[![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers) [![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers)
[![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members) [![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members)

View File

@@ -9,6 +9,7 @@ from selenium.common.exceptions import InvalidArgumentException, WebDriverExcept
from selenium.webdriver.chrome.service import Service as ChromeService from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.chrome import ChromeDriverManager from webdriver_manager.chrome import ChromeDriverManager
from .config import *
import logging, time import logging, time
import base64 import base64
from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw, ImageFont
@@ -181,7 +182,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
initial_length = len(self.driver.page_source) initial_length = len(self.driver.page_source)
for ix in range(max_checks): for ix in range(max_checks):
print(f"Checking page load: {ix}") # print(f"Checking page load: {ix}")
time.sleep(check_interval) time.sleep(check_interval)
current_length = len(self.driver.page_source) current_length = len(self.driver.page_source)
@@ -190,7 +191,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
return self.driver.page_source return self.driver.page_source
def crawl(self, url: str) -> str: def crawl(self, url: str, **kwargs) -> str:
# Create md5 hash of the URL # Create md5 hash of the URL
import hashlib import hashlib
url_hash = hashlib.md5(url.encode()).hexdigest() url_hash = hashlib.md5(url.encode()).hexdigest()
@@ -213,15 +214,17 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
WebDriverWait(self.driver, 10).until( WebDriverWait(self.driver, 10).until(
EC.presence_of_all_elements_located((By.TAG_NAME, "body")) EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
) )
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
html = self._ensure_page_load() # self.driver.page_source html = self._ensure_page_load() # self.driver.page_source
can_not_be_done_headless = False # Look at my creativity for naming variables can_not_be_done_headless = False # Look at my creativity for naming variables
# TODO: Very ugly way for now but it works # TODO: Very ugly way for now but it works
if html == "<html><head></head><body></body></html>": if not kwargs.get('bypass_headless', False) and html == "<html><head></head><body></body></html>":
print("[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode...")
can_not_be_done_headless = True can_not_be_done_headless = True
options = Options() options = Options()
options.headless = False options.headless = False
# set window size very small # set window size very small
options.add_argument("--window-size=10,10") options.add_argument("--window-size=5,5")
driver = webdriver.Chrome(service=self.service, options=options) driver = webdriver.Chrome(service=self.service, options=options)
driver.get(url) driver.get(url)
html = driver.page_source html = driver.page_source

View File

@@ -101,7 +101,7 @@ class LLMExtractionStrategy(ExtractionStrategy):
prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION
if self.extract_type == "schema": if self.extract_type == "schema":
variable_values["SCHEMA"] = json.dumps(self.schema) variable_values["SCHEMA"] = json.dumps(self.schema, indent=2)
prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION
for variable in variable_values: for variable in variable_values:
@@ -109,7 +109,7 @@ class LLMExtractionStrategy(ExtractionStrategy):
"{" + variable + "}", variable_values[variable] "{" + variable + "}", variable_values[variable]
) )
response = perform_completion_with_backoff(self.provider, prompt_with_variables, self.api_token) response = perform_completion_with_backoff(self.provider, prompt_with_variables, self.api_token) # , json_response=self.extract_type == "schema")
try: try:
blocks = extract_xml_data(["blocks"], response.choices[0].message.content)['blocks'] blocks = extract_xml_data(["blocks"], response.choices[0].message.content)['blocks']
blocks = json.loads(blocks) blocks = json.loads(blocks)
@@ -196,6 +196,10 @@ class LLMExtractionStrategy(ExtractionStrategy):
time.sleep(0.5) # 500 ms delay between each processing time.sleep(0.5) # 500 ms delay between each processing
else: else:
# Parallel processing using ThreadPoolExecutor # Parallel processing using ThreadPoolExecutor
# extract_func = partial(self.extract, url)
# for ix, section in enumerate(merged_sections):
# extracted_content.append(extract_func(ix, section))
with ThreadPoolExecutor(max_workers=4) as executor: with ThreadPoolExecutor(max_workers=4) as executor:
extract_func = partial(self.extract, url) extract_func = partial(self.extract, url)
futures = [executor.submit(extract_func, ix, section) for ix, section in enumerate(merged_sections)] futures = [executor.submit(extract_func, ix, section) for ix, section in enumerate(merged_sections)]

View File

@@ -186,7 +186,7 @@ The user has made the following request for what information to extract from the
Please carefully read the URL content and the user's request. If the user provided a desired JSON schema in the <schema_block> above, extract the requested information from the URL content according to that schema. If no schema was provided, infer an appropriate JSON schema based on the user's request that will best capture the key information they are looking for. Please carefully read the URL content and the user's request. If the user provided a desired JSON schema in the <schema_block> above, extract the requested information from the URL content according to that schema. If no schema was provided, infer an appropriate JSON schema based on the user's request that will best capture the key information they are looking for.
Extraction instructions: Extraction instructions:
Return the extracted information as a list of JSON objects, with each object in the list corresponding to a block of content from the URL, in the same order as it appears on the page. Wrap the entire JSON list in <blocks> tags. Return the extracted information as a list of JSON objects, with each object in the list corresponding to a block of content from the URL, in the same order as it appears on the page. Wrap the entire JSON list in <blocks>...</blocks> XML tags.
Quality Reflection: Quality Reflection:
Before outputting your final answer, double check that the JSON you are returning is complete, containing all the information requested by the user, and is valid JSON that could be parsed by json.loads() with no errors or omissions. The outputted JSON objects should fully match the schema, either provided or inferred. Before outputting your final answer, double check that the JSON you are returning is complete, containing all the information requested by the user, and is valid JSON that could be parsed by json.loads() with no errors or omissions. The outputted JSON objects should fully match the schema, either provided or inferred.
@@ -194,5 +194,11 @@ Before outputting your final answer, double check that the JSON you are returnin
Quality Score: Quality Score:
After reflecting, score the quality and completeness of the JSON data you are about to return on a scale of 1 to 5. Write the score inside <score> tags. After reflecting, score the quality and completeness of the JSON data you are about to return on a scale of 1 to 5. Write the score inside <score> tags.
Avoid Common Mistakes:
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
- Do not miss closing </blocks> tag at the end of the JSON output.
- Do not generate the Python coee show me how to do the task, this is your task to extract the information and return it in JSON format.
Result Result
Output the final list of JSON objects, wrapped in <blocks> tags.""" Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""

View File

@@ -419,7 +419,6 @@ def get_content_of_website(url, html, word_count_threshold = MIN_WORD_THRESHOLD,
print('Error processing HTML content:', str(e)) print('Error processing HTML content:', str(e))
raise InvalidCSSSelectorError(f"Invalid CSS selector: {css_selector}") from e raise InvalidCSSSelectorError(f"Invalid CSS selector: {css_selector}") from e
def get_content_of_website_optimized(url: str, html: str, word_count_threshold: int = MIN_WORD_THRESHOLD, css_selector: str = None, **kwargs) -> Dict[str, Any]: def get_content_of_website_optimized(url: str, html: str, word_count_threshold: int = MIN_WORD_THRESHOLD, css_selector: str = None, **kwargs) -> Dict[str, Any]:
if not html: if not html:
return None return None
@@ -544,7 +543,6 @@ def get_content_of_website_optimized(url: str, html: str, word_count_threshold:
'metadata': meta 'metadata': meta
} }
def extract_metadata(html, soup = None): def extract_metadata(html, soup = None):
metadata = {} metadata = {}
@@ -603,12 +601,16 @@ def extract_xml_data(tags, string):
return data return data
# Function to perform the completion with exponential backoff # Function to perform the completion with exponential backoff
def perform_completion_with_backoff(provider, prompt_with_variables, api_token): def perform_completion_with_backoff(provider, prompt_with_variables, api_token, json_response = False):
from litellm import completion from litellm import completion
from litellm.exceptions import RateLimitError from litellm.exceptions import RateLimitError
max_attempts = 3 max_attempts = 3
base_delay = 2 # Base delay in seconds, you can adjust this based on your needs base_delay = 2 # Base delay in seconds, you can adjust this based on your needs
extra_args = {}
if json_response:
extra_args["response_format"] = { "type": "json_object" }
for attempt in range(max_attempts): for attempt in range(max_attempts):
try: try:
response =completion( response =completion(
@@ -617,7 +619,8 @@ def perform_completion_with_backoff(provider, prompt_with_variables, api_token):
{"role": "user", "content": prompt_with_variables} {"role": "user", "content": prompt_with_variables}
], ],
temperature=0.01, temperature=0.01,
api_key=api_token api_key=api_token,
**extra_args
) )
return response # Return the successful response return response # Return the successful response
except RateLimitError as e: except RateLimitError as e:

View File

@@ -11,6 +11,8 @@ from .crawler_strategy import *
from typing import List from typing import List
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from .config import * from .config import *
import warnings
warnings.filterwarnings("ignore", message='Field "model_name" has conflict with protected namespace "model_".')
class WebCrawler: class WebCrawler:
@@ -164,7 +166,7 @@ class WebCrawler:
if user_agent: if user_agent:
self.crawler_strategy.update_user_agent(user_agent) self.crawler_strategy.update_user_agent(user_agent)
t1 = time.time() t1 = time.time()
html = self.crawler_strategy.crawl(url) html = self.crawler_strategy.crawl(url, **kwargs)
t2 = time.time() t2 = time.time()
if verbose: if verbose:
print(f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1} seconds") print(f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1} seconds")

View File

@@ -1,4 +1,4 @@
# Crawl4AI v0.2.72 # Crawl4AI v0.2.73
Welcome to the official documentation for Crawl4AI! 🕷️🤖 Crawl4AI is an open-source Python library designed to simplify web crawling and extract useful information from web pages. This documentation will guide you through the features, usage, and customization of Crawl4AI. Welcome to the official documentation for Crawl4AI! 🕷️🤖 Crawl4AI is an open-source Python library designed to simplify web crawling and extract useful information from web pages. This documentation will guide you through the features, usage, and customization of Crawl4AI.

View File

@@ -18,15 +18,9 @@ default_requirements = [req for req in requirements if not req.startswith(("torc
torch_requirements = [req for req in requirements if req.startswith(("torch", "nltk", "spacy", "scikit-learn", "numpy"))] torch_requirements = [req for req in requirements if req.startswith(("torch", "nltk", "spacy", "scikit-learn", "numpy"))]
transformer_requirements = [req for req in requirements if req.startswith(("transformers", "tokenizers", "onnxruntime"))] transformer_requirements = [req for req in requirements if req.startswith(("transformers", "tokenizers", "onnxruntime"))]
class CustomInstallCommand(install):
"""Customized setuptools install command to install spacy without dependencies."""
def run(self):
install.run(self)
subprocess.check_call([os.sys.executable, '-m', 'pip', 'install', 'spacy', '--no-deps'])
setup( setup(
name="Crawl4AI", name="Crawl4AI",
version="0.2.72", version="0.2.73",
description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper", description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper",
long_description=open("README.md").read(), long_description=open("README.md").read(),
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
@@ -41,9 +35,6 @@ setup(
"transformer": transformer_requirements, "transformer": transformer_requirements,
"all": requirements, "all": requirements,
}, },
cmdclass={
'install': CustomInstallCommand,
},
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [
'crawl4ai-download-models=crawl4ai.model_loader:main', 'crawl4ai-download-models=crawl4ai.model_loader:main',