From 9926eb9f954c5760c53093eebce75f56ef12ed15 Mon Sep 17 00:00:00 2001 From: unclecode Date: Wed, 3 Jul 2024 15:19:22 +0800 Subject: [PATCH] feat: Bump version to v0.2.73 and update documentation This commit updates the version number to v0.2.73 and makes corresponding changes in the README.md and Dockerfile. Docker file install the default mode, this resolve many of installation issues. Additionally, the installation instructions are updated to include support for different modes. Setup.py doesn't have anymore dependancy on Spacy. The change log is also updated to reflect these changes. Supporting websites need with-head browser. --- README.md | 2 +- crawl4ai/crawler_strategy.py | 11 +++++++---- crawl4ai/extraction_strategy.py | 8 ++++++-- crawl4ai/prompts.py | 10 ++++++++-- crawl4ai/utils.py | 11 +++++++---- crawl4ai/web_crawler.py | 4 +++- docs/md/index.md | 2 +- setup.py | 11 +---------- 8 files changed, 34 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index ed3c67f0..cf4e4760 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Crawl4AI v0.2.72 🕷️🤖 +# Crawl4AI v0.2.73 🕷️🤖 [![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers) [![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members) diff --git a/crawl4ai/crawler_strategy.py b/crawl4ai/crawler_strategy.py index 3d40c528..21de883e 100644 --- a/crawl4ai/crawler_strategy.py +++ b/crawl4ai/crawler_strategy.py @@ -9,6 +9,7 @@ from selenium.common.exceptions import InvalidArgumentException, WebDriverExcept from selenium.webdriver.chrome.service import Service as ChromeService from webdriver_manager.chrome import ChromeDriverManager +from .config import * import logging, time import base64 from PIL import Image, ImageDraw, ImageFont @@ -181,7 +182,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): initial_length = len(self.driver.page_source) for ix in range(max_checks): - print(f"Checking page load: {ix}") + # print(f"Checking page load: {ix}") time.sleep(check_interval) current_length = len(self.driver.page_source) @@ -190,7 +191,7 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): return self.driver.page_source - def crawl(self, url: str) -> str: + def crawl(self, url: str, **kwargs) -> str: # Create md5 hash of the URL import hashlib url_hash = hashlib.md5(url.encode()).hexdigest() @@ -213,15 +214,17 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy): WebDriverWait(self.driver, 10).until( EC.presence_of_all_elements_located((By.TAG_NAME, "body")) ) + self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") html = self._ensure_page_load() # self.driver.page_source can_not_be_done_headless = False # Look at my creativity for naming variables # TODO: Very ugly way for now but it works - if html == "": + if not kwargs.get('bypass_headless', False) and html == "": + print("[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode...") can_not_be_done_headless = True options = Options() options.headless = False # set window size very small - options.add_argument("--window-size=10,10") + options.add_argument("--window-size=5,5") driver = webdriver.Chrome(service=self.service, options=options) driver.get(url) html = driver.page_source diff --git a/crawl4ai/extraction_strategy.py b/crawl4ai/extraction_strategy.py index f635f60b..d4415c88 100644 --- a/crawl4ai/extraction_strategy.py +++ b/crawl4ai/extraction_strategy.py @@ -101,7 +101,7 @@ class LLMExtractionStrategy(ExtractionStrategy): prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION if self.extract_type == "schema": - variable_values["SCHEMA"] = json.dumps(self.schema) + variable_values["SCHEMA"] = json.dumps(self.schema, indent=2) prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION for variable in variable_values: @@ -109,7 +109,7 @@ class LLMExtractionStrategy(ExtractionStrategy): "{" + variable + "}", variable_values[variable] ) - response = perform_completion_with_backoff(self.provider, prompt_with_variables, self.api_token) + response = perform_completion_with_backoff(self.provider, prompt_with_variables, self.api_token) # , json_response=self.extract_type == "schema") try: blocks = extract_xml_data(["blocks"], response.choices[0].message.content)['blocks'] blocks = json.loads(blocks) @@ -196,6 +196,10 @@ class LLMExtractionStrategy(ExtractionStrategy): time.sleep(0.5) # 500 ms delay between each processing else: # Parallel processing using ThreadPoolExecutor + # extract_func = partial(self.extract, url) + # for ix, section in enumerate(merged_sections): + # extracted_content.append(extract_func(ix, section)) + with ThreadPoolExecutor(max_workers=4) as executor: extract_func = partial(self.extract, url) futures = [executor.submit(extract_func, ix, section) for ix, section in enumerate(merged_sections)] diff --git a/crawl4ai/prompts.py b/crawl4ai/prompts.py index 39de7e3b..323c4774 100644 --- a/crawl4ai/prompts.py +++ b/crawl4ai/prompts.py @@ -186,7 +186,7 @@ The user has made the following request for what information to extract from the Please carefully read the URL content and the user's request. If the user provided a desired JSON schema in the above, extract the requested information from the URL content according to that schema. If no schema was provided, infer an appropriate JSON schema based on the user's request that will best capture the key information they are looking for. Extraction instructions: -Return the extracted information as a list of JSON objects, with each object in the list corresponding to a block of content from the URL, in the same order as it appears on the page. Wrap the entire JSON list in tags. +Return the extracted information as a list of JSON objects, with each object in the list corresponding to a block of content from the URL, in the same order as it appears on the page. Wrap the entire JSON list in ... XML tags. Quality Reflection: Before outputting your final answer, double check that the JSON you are returning is complete, containing all the information requested by the user, and is valid JSON that could be parsed by json.loads() with no errors or omissions. The outputted JSON objects should fully match the schema, either provided or inferred. @@ -194,5 +194,11 @@ Before outputting your final answer, double check that the JSON you are returnin Quality Score: After reflecting, score the quality and completeness of the JSON data you are about to return on a scale of 1 to 5. Write the score inside tags. +Avoid Common Mistakes: +- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors. +- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places. +- Do not miss closing tag at the end of the JSON output. +- Do not generate the Python coee show me how to do the task, this is your task to extract the information and return it in JSON format. + Result -Output the final list of JSON objects, wrapped in tags.""" \ No newline at end of file +Output the final list of JSON objects, wrapped in ... XML tags. Make sure to close the tag properly.""" \ No newline at end of file diff --git a/crawl4ai/utils.py b/crawl4ai/utils.py index be6337de..474ce395 100644 --- a/crawl4ai/utils.py +++ b/crawl4ai/utils.py @@ -419,7 +419,6 @@ def get_content_of_website(url, html, word_count_threshold = MIN_WORD_THRESHOLD, print('Error processing HTML content:', str(e)) raise InvalidCSSSelectorError(f"Invalid CSS selector: {css_selector}") from e - def get_content_of_website_optimized(url: str, html: str, word_count_threshold: int = MIN_WORD_THRESHOLD, css_selector: str = None, **kwargs) -> Dict[str, Any]: if not html: return None @@ -544,7 +543,6 @@ def get_content_of_website_optimized(url: str, html: str, word_count_threshold: 'metadata': meta } - def extract_metadata(html, soup = None): metadata = {} @@ -603,12 +601,16 @@ def extract_xml_data(tags, string): return data # Function to perform the completion with exponential backoff -def perform_completion_with_backoff(provider, prompt_with_variables, api_token): +def perform_completion_with_backoff(provider, prompt_with_variables, api_token, json_response = False): from litellm import completion from litellm.exceptions import RateLimitError max_attempts = 3 base_delay = 2 # Base delay in seconds, you can adjust this based on your needs + extra_args = {} + if json_response: + extra_args["response_format"] = { "type": "json_object" } + for attempt in range(max_attempts): try: response =completion( @@ -617,7 +619,8 @@ def perform_completion_with_backoff(provider, prompt_with_variables, api_token): {"role": "user", "content": prompt_with_variables} ], temperature=0.01, - api_key=api_token + api_key=api_token, + **extra_args ) return response # Return the successful response except RateLimitError as e: diff --git a/crawl4ai/web_crawler.py b/crawl4ai/web_crawler.py index ef85066e..954e9b84 100644 --- a/crawl4ai/web_crawler.py +++ b/crawl4ai/web_crawler.py @@ -11,6 +11,8 @@ from .crawler_strategy import * from typing import List from concurrent.futures import ThreadPoolExecutor from .config import * +import warnings +warnings.filterwarnings("ignore", message='Field "model_name" has conflict with protected namespace "model_".') class WebCrawler: @@ -164,7 +166,7 @@ class WebCrawler: if user_agent: self.crawler_strategy.update_user_agent(user_agent) t1 = time.time() - html = self.crawler_strategy.crawl(url) + html = self.crawler_strategy.crawl(url, **kwargs) t2 = time.time() if verbose: print(f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1} seconds") diff --git a/docs/md/index.md b/docs/md/index.md index 21fcdeb0..b08fdd12 100644 --- a/docs/md/index.md +++ b/docs/md/index.md @@ -1,4 +1,4 @@ -# Crawl4AI v0.2.72 +# Crawl4AI v0.2.73 Welcome to the official documentation for Crawl4AI! 🕷️🤖 Crawl4AI is an open-source Python library designed to simplify web crawling and extract useful information from web pages. This documentation will guide you through the features, usage, and customization of Crawl4AI. diff --git a/setup.py b/setup.py index cfbeaddf..ddd69d12 100644 --- a/setup.py +++ b/setup.py @@ -18,15 +18,9 @@ default_requirements = [req for req in requirements if not req.startswith(("torc torch_requirements = [req for req in requirements if req.startswith(("torch", "nltk", "spacy", "scikit-learn", "numpy"))] transformer_requirements = [req for req in requirements if req.startswith(("transformers", "tokenizers", "onnxruntime"))] -class CustomInstallCommand(install): - """Customized setuptools install command to install spacy without dependencies.""" - def run(self): - install.run(self) - subprocess.check_call([os.sys.executable, '-m', 'pip', 'install', 'spacy', '--no-deps']) - setup( name="Crawl4AI", - version="0.2.72", + version="0.2.73", description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper", long_description=open("README.md").read(), long_description_content_type="text/markdown", @@ -41,9 +35,6 @@ setup( "transformer": transformer_requirements, "all": requirements, }, - cmdclass={ - 'install': CustomInstallCommand, - }, entry_points={ 'console_scripts': [ 'crawl4ai-download-models=crawl4ai.model_loader:main',