Create series of quickstart files.

This commit is contained in:
unclecode
2024-09-04 15:33:24 +08:00
parent 5c15837677
commit eb131bebdf
4 changed files with 977 additions and 441 deletions

View File

@@ -12,7 +12,7 @@ Crawl4AI simplifies asynchronous web crawling and data extraction, making it acc
## Try it Now! ## Try it Now!
✨ Play around with this [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1sJPAmeLj5PMrg2VgOwMJ2ubGIcK0cJeX?usp=sharing) ✨ Play around with this [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1REChY6fXQf-EaVYLv0eHEWvzlYxGm0pd?usp=sharing)
✨ Visit our [Documentation Website](https://crawl4ai.com/mkdocs/) ✨ Visit our [Documentation Website](https://crawl4ai.com/mkdocs/)

View File

@@ -2,7 +2,9 @@
"cells": [ "cells": [
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "6yLvrXn7yZQI"
},
"source": [ "source": [
"# Crawl4AI: Advanced Web Crawling and Data Extraction\n", "# Crawl4AI: Advanced Web Crawling and Data Extraction\n",
"\n", "\n",
@@ -17,7 +19,9 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "KIn_9nxFyZQK"
},
"source": [ "source": [
"## Installation\n", "## Installation\n",
"\n", "\n",
@@ -26,26 +30,44 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"source": [
"!sudo apt-get update && sudo apt-get install -y libwoff1 libopus0 libwebp6 libwebpdemux2 libenchant1c2a libgudev-1.0-0 libsecret-1-0 libhyphen0 libgdk-pixbuf2.0-0 libegl1 libnotify4 libxslt1.1 libevent-2.1-7 libgles2 libvpx6 libxcomposite1 libatk1.0-0 libatk-bridge2.0-0 libepoxy0 libgtk-3-0 libharfbuzz-icu0"
],
"metadata": {
"id": "mSnaxLf3zMog"
},
"execution_count": null, "execution_count": null,
"metadata": {}, "outputs": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xlXqaRtayZQK"
},
"outputs": [], "outputs": [],
"source": [ "source": [
"!pip install \"crawl4ai @ git+https://github.com/unclecode/crawl4ai.git\"\n", "# !pip install \"crawl4ai @ git+https://github.com/unclecode/crawl4ai.git\"\n",
"!pip install \"crawl4ai @ git+https://github.com/unclecode/crawl4ai.git@staging\"\n",
"!pip install nest-asyncio\n", "!pip install nest-asyncio\n",
"!playwright install" "!playwright install"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "qKCE7TI7yZQL"
},
"source": [ "source": [
"Now, let's import the necessary libraries:" "Now, let's import the necessary libraries:"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"metadata": {}, "metadata": {
"id": "I67tr7aAyZQL"
},
"outputs": [], "outputs": [],
"source": [ "source": [
"import asyncio\n", "import asyncio\n",
@@ -61,7 +83,9 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "h7yR_Rt_yZQM"
},
"source": [ "source": [
"## Basic Usage\n", "## Basic Usage\n",
"\n", "\n",
@@ -70,21 +94,49 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 4,
"metadata": {}, "metadata": {
"outputs": [], "colab": {
"base_uri": "https://localhost:8080/"
},
"id": "yBh6hf4WyZQM",
"outputId": "0f83af5c-abba-4175-ed95-70b7512e6bcc"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.18 seconds\n",
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.18 seconds.\n",
"18219\n"
]
}
],
"source": [ "source": [
"async def simple_crawl():\n", "async def simple_crawl():\n",
" async with AsyncWebCrawler(verbose=True) as crawler:\n", " async with AsyncWebCrawler(verbose=True) as crawler:\n",
" result = await crawler.arun(url=\"https://www.nbcnews.com/business\")\n", " result = await crawler.arun(url=\"https://www.nbcnews.com/business\")\n",
" print(result.markdown[:500]) # Print first 500 characters\n", " print(len(result.markdown))\n",
"\n",
"await simple_crawl()" "await simple_crawl()"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "source": [
"💡 By default, **Crawl4AI** caches the result of every URL, so the next time you call it, youll get an instant result. But if you want to bypass the cache, just set `bypass_cache=True`."
],
"metadata": {
"id": "9rtkgHI28uI4"
}
},
{
"cell_type": "markdown",
"metadata": {
"id": "MzZ0zlJ9yZQM"
},
"source": [ "source": [
"## Advanced Features\n", "## Advanced Features\n",
"\n", "\n",
@@ -93,9 +145,31 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"metadata": {}, "metadata": {
"outputs": [], "colab": {
"base_uri": "https://localhost:8080/"
},
"id": "gHStF86xyZQM",
"outputId": "34d0fb6d-4dec-4677-f76e-85a1f082829b"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
"[LOG] 🕸️ Crawling https://www.nbcnews.com/business using AsyncPlaywrightCrawlerStrategy...\n",
"[LOG] ✅ Crawled https://www.nbcnews.com/business successfully!\n",
"[LOG] 🚀 Crawling done for https://www.nbcnews.com/business, success: True, time taken: 9.78 seconds\n",
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.44 seconds\n",
"[LOG] 🔥 Extracting semantic blocks for https://www.nbcnews.com/business, Strategy: AsyncWebCrawler\n",
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.45 seconds.\n",
"34241\n"
]
}
],
"source": [ "source": [
"async def js_and_css():\n", "async def js_and_css():\n",
" async with AsyncWebCrawler(verbose=True) as crawler:\n", " async with AsyncWebCrawler(verbose=True) as crawler:\n",
@@ -103,17 +177,19 @@
" result = await crawler.arun(\n", " result = await crawler.arun(\n",
" url=\"https://www.nbcnews.com/business\",\n", " url=\"https://www.nbcnews.com/business\",\n",
" js_code=js_code,\n", " js_code=js_code,\n",
" css_selector=\"article.tease-card\",\n", " # css_selector=\"YOUR_CSS_SELECTOR_HERE\",\n",
" bypass_cache=True\n", " bypass_cache=True\n",
" )\n", " )\n",
" print(result.extracted_content[:500]) # Print first 500 characters\n", " print(len(result.markdown))\n",
"\n", "\n",
"await js_and_css()" "await js_and_css()"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "cqE_W4coyZQM"
},
"source": [ "source": [
"### Using a Proxy\n", "### Using a Proxy\n",
"\n", "\n",
@@ -123,7 +199,9 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {
"id": "QjAyiAGqyZQM"
},
"outputs": [], "outputs": [],
"source": [ "source": [
"async def use_proxy():\n", "async def use_proxy():\n",
@@ -140,7 +218,9 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "XTZ88lbayZQN"
},
"source": [ "source": [
"### Extracting Structured Data with OpenAI\n", "### Extracting Structured Data with OpenAI\n",
"\n", "\n",
@@ -149,11 +229,45 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 14,
"metadata": {}, "metadata": {
"outputs": [], "colab": {
"base_uri": "https://localhost:8080/"
},
"id": "fIOlDayYyZQN",
"outputId": "cb8359cc-dee0-4762-9698-5dfdcee055b8"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
"[LOG] 🕸️ Crawling https://openai.com/api/pricing/ using AsyncPlaywrightCrawlerStrategy...\n",
"[LOG] ✅ Crawled https://openai.com/api/pricing/ successfully!\n",
"[LOG] 🚀 Crawling done for https://openai.com/api/pricing/, success: True, time taken: 3.77 seconds\n",
"[LOG] 🚀 Content extracted for https://openai.com/api/pricing/, success: True, time taken: 0.21 seconds\n",
"[LOG] 🔥 Extracting semantic blocks for https://openai.com/api/pricing/, Strategy: AsyncWebCrawler\n",
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 0\n",
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 1\n",
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 2\n",
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 3\n",
"[LOG] Extracted 4 blocks from URL: https://openai.com/api/pricing/ block index: 3\n",
"[LOG] Call LLM for https://openai.com/api/pricing/ - block index: 4\n",
"[LOG] Extracted 5 blocks from URL: https://openai.com/api/pricing/ block index: 0\n",
"[LOG] Extracted 1 blocks from URL: https://openai.com/api/pricing/ block index: 4\n",
"[LOG] Extracted 8 blocks from URL: https://openai.com/api/pricing/ block index: 1\n",
"[LOG] Extracted 12 blocks from URL: https://openai.com/api/pricing/ block index: 2\n",
"[LOG] 🚀 Extraction done for https://openai.com/api/pricing/, time taken: 8.55 seconds.\n",
"5029\n"
]
}
],
"source": [ "source": [
"import os\n", "import os\n",
"from google.colab import userdata\n",
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')\n",
"\n", "\n",
"class OpenAIModelFee(BaseModel):\n", "class OpenAIModelFee(BaseModel):\n",
" model_name: str = Field(..., description=\"Name of the OpenAI model.\")\n", " model_name: str = Field(..., description=\"Name of the OpenAI model.\")\n",
@@ -166,33 +280,85 @@
" url='https://openai.com/api/pricing/',\n", " url='https://openai.com/api/pricing/',\n",
" word_count_threshold=1,\n", " word_count_threshold=1,\n",
" extraction_strategy=LLMExtractionStrategy(\n", " extraction_strategy=LLMExtractionStrategy(\n",
" provider=\"openai/gpt-4o\", api_token=os.getenv('OPENAI_API_KEY'), \n", " provider=\"openai/gpt-4o\", api_token=os.getenv('OPENAI_API_KEY'),\n",
" schema=OpenAIModelFee.schema(),\n", " schema=OpenAIModelFee.schema(),\n",
" extraction_type=\"schema\",\n", " extraction_type=\"schema\",\n",
" instruction=\"\"\"From the crawled content, extract all mentioned model names along with their fees for input and output tokens. \n", " instruction=\"\"\"From the crawled content, extract all mentioned model names along with their fees for input and output tokens.\n",
" Do not miss any models in the entire content. One extracted model JSON format should look like this: \n", " Do not miss any models in the entire content. One extracted model JSON format should look like this:\n",
" {\"model_name\": \"GPT-4\", \"input_fee\": \"US$10.00 / 1M tokens\", \"output_fee\": \"US$30.00 / 1M tokens\"}.\"\"\"\n", " {\"model_name\": \"GPT-4\", \"input_fee\": \"US$10.00 / 1M tokens\", \"output_fee\": \"US$30.00 / 1M tokens\"}.\"\"\"\n",
" ), \n", " ),\n",
" bypass_cache=True,\n", " bypass_cache=True,\n",
" )\n", " )\n",
" print(result.extracted_content)\n", " print(len(result.extracted_content))\n",
"\n", "\n",
"# Uncomment the following line to run the OpenAI extraction example\n", "# Uncomment the following line to run the OpenAI extraction example\n",
"# await extract_openai_fees()" "await extract_openai_fees()"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "BypA5YxEyZQN"
},
"source": [ "source": [
"### Advanced Multi-Page Crawling with JavaScript Execution" "### Advanced Multi-Page Crawling with JavaScript Execution"
] ]
}, },
{
"cell_type": "markdown",
"source": [
"## Advanced Multi-Page Crawling with JavaScript Execution\n",
"\n",
"This example demonstrates Crawl4AI's ability to handle complex crawling scenarios, specifically extracting commits from multiple pages of a GitHub repository. The challenge here is that clicking the \"Next\" button doesn't load a new page, but instead uses asynchronous JavaScript to update the content. This is a common hurdle in modern web crawling.\n",
"\n",
"To overcome this, we use Crawl4AI's custom JavaScript execution to simulate clicking the \"Next\" button, and implement a custom hook to detect when new data has loaded. Our strategy involves comparing the first commit's text before and after \"clicking\" Next, waiting until it changes to confirm new data has rendered. This showcases Crawl4AI's flexibility in handling dynamic content and its ability to implement custom logic for even the most challenging crawling tasks."
],
"metadata": {
"id": "tfkcVQ0b7mw-"
}
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 11,
"metadata": {}, "metadata": {
"outputs": [], "colab": {
"base_uri": "https://localhost:8080/"
},
"id": "qUBKGpn3yZQN",
"outputId": "3e555b6a-ed33-42f4-cce9-499a923fbe17"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 5.16 seconds\n",
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.28 seconds\n",
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.28 seconds.\n",
"Page 1: Found 35 commits\n",
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.78 seconds\n",
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.90 seconds\n",
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.90 seconds.\n",
"Page 2: Found 35 commits\n",
"[LOG] 🕸️ Crawling https://github.com/microsoft/TypeScript/commits/main using AsyncPlaywrightCrawlerStrategy...\n",
"[LOG] ✅ Crawled https://github.com/microsoft/TypeScript/commits/main successfully!\n",
"[LOG] 🚀 Crawling done for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 2.00 seconds\n",
"[LOG] 🚀 Content extracted for https://github.com/microsoft/TypeScript/commits/main, success: True, time taken: 0.74 seconds\n",
"[LOG] 🔥 Extracting semantic blocks for https://github.com/microsoft/TypeScript/commits/main, Strategy: AsyncWebCrawler\n",
"[LOG] 🚀 Extraction done for https://github.com/microsoft/TypeScript/commits/main, time taken: 0.75 seconds.\n",
"Page 3: Found 35 commits\n",
"Successfully crawled 105 commits across 3 pages\n"
]
}
],
"source": [ "source": [
"import re\n", "import re\n",
"from bs4 import BeautifulSoup\n", "from bs4 import BeautifulSoup\n",
@@ -200,7 +366,7 @@
"async def crawl_typescript_commits():\n", "async def crawl_typescript_commits():\n",
" first_commit = \"\"\n", " first_commit = \"\"\n",
" async def on_execution_started(page):\n", " async def on_execution_started(page):\n",
" nonlocal first_commit \n", " nonlocal first_commit\n",
" try:\n", " try:\n",
" while True:\n", " while True:\n",
" await page.wait_for_selector('li.Box-sc-g0xbh4-0 h4')\n", " await page.wait_for_selector('li.Box-sc-g0xbh4-0 h4')\n",
@@ -252,16 +418,71 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "EJRnYsp6yZQN"
},
"source": [ "source": [
"### Using JsonCssExtractionStrategy for Fast Structured Output" "### Using JsonCssExtractionStrategy for Fast Structured Output"
] ]
}, },
{
"cell_type": "markdown",
"source": [
"The JsonCssExtractionStrategy is a powerful feature of Crawl4AI that allows for precise, structured data extraction from web pages. Here's how it works:\n",
"\n",
"1. You define a schema that describes the pattern of data you're interested in extracting.\n",
"2. The schema includes a base selector that identifies repeating elements on the page.\n",
"3. Within the schema, you define fields, each with its own selector and type.\n",
"4. These field selectors are applied within the context of each base selector element.\n",
"5. The strategy supports nested structures, lists within lists, and various data types.\n",
"6. You can even include computed fields for more complex data manipulation.\n",
"\n",
"This approach allows for highly flexible and precise data extraction, transforming semi-structured web content into clean, structured JSON data. It's particularly useful for extracting consistent data patterns from pages like product listings, news articles, or search results.\n",
"\n",
"For more details and advanced usage, check out the full documentation on the Crawl4AI website."
],
"metadata": {
"id": "1ZMqIzB_8SYp"
}
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 12,
"metadata": {}, "metadata": {
"outputs": [], "colab": {
"base_uri": "https://localhost:8080/"
},
"id": "trCMR2T9yZQN",
"outputId": "718d36f4-cccf-40f4-8d8c-c3ba73524d16"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"[LOG] 🌤️ Warming up the AsyncWebCrawler\n",
"[LOG] 🌞 AsyncWebCrawler is ready to crawl\n",
"[LOG] 🕸️ Crawling https://www.nbcnews.com/business using AsyncPlaywrightCrawlerStrategy...\n",
"[LOG] ✅ Crawled https://www.nbcnews.com/business successfully!\n",
"[LOG] 🚀 Crawling done for https://www.nbcnews.com/business, success: True, time taken: 7.00 seconds\n",
"[LOG] 🚀 Content extracted for https://www.nbcnews.com/business, success: True, time taken: 0.32 seconds\n",
"[LOG] 🔥 Extracting semantic blocks for https://www.nbcnews.com/business, Strategy: AsyncWebCrawler\n",
"[LOG] 🚀 Extraction done for https://www.nbcnews.com/business, time taken: 0.48 seconds.\n",
"Successfully extracted 11 news teasers\n",
"{\n",
" \"category\": \"Business News\",\n",
" \"headline\": \"NBC ripped up its Olympics playbook for 2024 \\u2014 so far, the new strategy paid off\",\n",
" \"summary\": \"The Olympics have long been key to NBCUniversal. Paris marked the 18th Olympic Games broadcast by NBC in the U.S.\",\n",
" \"time\": \"13h ago\",\n",
" \"image\": {\n",
" \"src\": \"https://media-cldnry.s-nbcnews.com/image/upload/t_focal-200x100,f_auto,q_auto:best/rockcms/2024-09/240903-nbc-olympics-ch-1344-c7a486.jpg\",\n",
" \"alt\": \"Mike Tirico.\"\n",
" },\n",
" \"link\": \"https://www.nbcnews.com/business\"\n",
"}\n"
]
}
],
"source": [ "source": [
"async def extract_news_teasers():\n", "async def extract_news_teasers():\n",
" schema = {\n", " schema = {\n",
@@ -326,7 +547,9 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "FnyVhJaByZQN"
},
"source": [ "source": [
"## Speed Comparison\n", "## Speed Comparison\n",
"\n", "\n",
@@ -334,19 +557,83 @@
] ]
}, },
{ {
"cell_type": "code", "cell_type": "markdown",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [ "source": [
"💡 **Note on Speed Comparison:**\n",
"\n",
"The speed test conducted here is running on Google Colab, where the internet speed and performance can vary and may not reflect optimal conditions. When we call Firecrawl's API, we're seeing its best performance, while Crawl4AI's performance is limited by Colab's network speed.\n",
"\n",
"For a more accurate comparison, it's recommended to run these tests on your own servers or computers with a stable and fast internet connection. Despite these limitations, Crawl4AI still demonstrates faster performance in this environment.\n",
"\n",
"If you run these tests locally, you may observe an even more significant speed advantage for Crawl4AI compared to other services."
],
"metadata": {
"id": "agDD186f3wig"
}
},
{
"cell_type": "code",
"source": [
"!pip install firecrawl"
],
"metadata": {
"id": "F7KwHv8G1LbY"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "91813zILyZQN",
"outputId": "663223db-ab89-4976-b233-05ceca62b19b"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Firecrawl (simulated):\n",
"Time taken: 4.38 seconds\n",
"Content length: 41967 characters\n",
"Images found: 49\n",
"\n",
"Crawl4AI (simple crawl):\n",
"Time taken: 4.22 seconds\n",
"Content length: 18221 characters\n",
"Images found: 49\n",
"\n",
"Crawl4AI (with JavaScript execution):\n",
"Time taken: 9.13 seconds\n",
"Content length: 34243 characters\n",
"Images found: 89\n"
]
}
],
"source": [
"import os\n",
"from google.colab import userdata\n",
"os.environ['FIRECRAWL_API_KEY'] = userdata.get('FIRECRAWL_API_KEY')\n",
"import time\n", "import time\n",
"from firecrawl import FirecrawlApp\n",
"\n", "\n",
"async def speed_comparison():\n", "async def speed_comparison():\n",
" # Simulated Firecrawl performance\n", " # Simulated Firecrawl performance\n",
" app = FirecrawlApp(api_key=os.environ['FIRECRAWL_API_KEY'])\n",
" start = time.time()\n",
" scrape_status = app.scrape_url(\n",
" 'https://www.nbcnews.com/business',\n",
" params={'formats': ['markdown', 'html']}\n",
" )\n",
" end = time.time()\n",
" print(\"Firecrawl (simulated):\")\n", " print(\"Firecrawl (simulated):\")\n",
" print(\"Time taken: 7.02 seconds\")\n", " print(f\"Time taken: {end - start:.2f} seconds\")\n",
" print(\"Content length: 42074 characters\")\n", " print(f\"Content length: {len(scrape_status['markdown'])} characters\")\n",
" print(\"Images found: 49\")\n", " print(f\"Images found: {scrape_status['markdown'].count('cldnry.s-nbcnews.com')}\")\n",
" print()\n", " print()\n",
"\n", "\n",
" async with AsyncWebCrawler() as crawler:\n", " async with AsyncWebCrawler() as crawler:\n",
@@ -355,7 +642,7 @@
" result = await crawler.arun(\n", " result = await crawler.arun(\n",
" url=\"https://www.nbcnews.com/business\",\n", " url=\"https://www.nbcnews.com/business\",\n",
" word_count_threshold=0,\n", " word_count_threshold=0,\n",
" bypass_cache=True, \n", " bypass_cache=True,\n",
" verbose=False\n", " verbose=False\n",
" )\n", " )\n",
" end = time.time()\n", " end = time.time()\n",
@@ -371,7 +658,7 @@
" url=\"https://www.nbcnews.com/business\",\n", " url=\"https://www.nbcnews.com/business\",\n",
" js_code=[\"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();\"],\n", " js_code=[\"const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();\"],\n",
" word_count_threshold=0,\n", " word_count_threshold=0,\n",
" bypass_cache=True, \n", " bypass_cache=True,\n",
" verbose=False\n", " verbose=False\n",
" )\n", " )\n",
" end = time.time()\n", " end = time.time()\n",
@@ -385,10 +672,12 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "OBFFYVJIyZQN"
},
"source": [ "source": [
"As you can see, Crawl4AI outperforms Firecrawl significantly:\n", "If you run on a local machine with a proper internet speed:\n",
"- Simple crawl: Crawl4AI is typically over 4 times faster than Firecrawl.\n", "- Simple crawl: Crawl4AI is typically over 3-4 times faster than Firecrawl.\n",
"- With JavaScript execution: Even when executing JavaScript to load more content (potentially doubling the number of images found), Crawl4AI is still faster than Firecrawl's simple crawl.\n", "- With JavaScript execution: Even when executing JavaScript to load more content (potentially doubling the number of images found), Crawl4AI is still faster than Firecrawl's simple crawl.\n",
"\n", "\n",
"Please note that actual performance may vary depending on network conditions and the specific content being crawled." "Please note that actual performance may vary depending on network conditions and the specific content being crawled."
@@ -396,7 +685,9 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {
"id": "A6_1RK1_yZQO"
},
"source": [ "source": [
"## Conclusion\n", "## Conclusion\n",
"\n", "\n",
@@ -435,8 +726,11 @@
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.8.10" "version": "3.8.10"
},
"colab": {
"provenance": []
} }
}, },
"nbformat": 4, "nbformat": 4,
"nbformat_minor": 4 "nbformat_minor": 0
} }

View File

@@ -0,0 +1,242 @@
import asyncio
import time
import json
import os
import re
from bs4 import BeautifulSoup
from pydantic import BaseModel, Field
from crawl4ai import AsyncWebCrawler
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy, LLMExtractionStrategy
print("Crawl4AI: Advanced Web Crawling and Data Extraction")
print("GitHub Repository: https://github.com/unclecode/crawl4ai")
print("Twitter: @unclecode")
print("Website: https://crawl4ai.com")
async def simple_crawl():
print("\n--- Basic Usage ---")
async with AsyncWebCrawler(verbose=True) as crawler:
result = await crawler.arun(url="https://www.nbcnews.com/business")
print(result.markdown[:500]) # Print first 500 characters
async def js_and_css():
print("\n--- Executing JavaScript and Using CSS Selectors ---")
async with AsyncWebCrawler(verbose=True) as crawler:
js_code = ["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"]
result = await crawler.arun(
url="https://www.nbcnews.com/business",
js_code=js_code,
css_selector="article.tease-card",
bypass_cache=True
)
print(result.extracted_content[:500]) # Print first 500 characters
async def use_proxy():
print("\n--- Using a Proxy ---")
print("Note: Replace 'http://your-proxy-url:port' with a working proxy to run this example.")
# Uncomment and modify the following lines to use a proxy
# async with AsyncWebCrawler(verbose=True, proxy="http://your-proxy-url:port") as crawler:
# result = await crawler.arun(
# url="https://www.nbcnews.com/business",
# bypass_cache=True
# )
# print(result.markdown[:500]) # Print first 500 characters
class OpenAIModelFee(BaseModel):
model_name: str = Field(..., description="Name of the OpenAI model.")
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
output_fee: str = Field(..., description="Fee for output token for the OpenAI model.")
async def extract_openai_fees():
print("\n--- Extracting Structured Data with OpenAI ---")
print("Note: Set your OpenAI API key as an environment variable to run this example.")
if not os.getenv('OPENAI_API_KEY'):
print("OpenAI API key not found. Skipping this example.")
return
async with AsyncWebCrawler(verbose=True) as crawler:
result = await crawler.arun(
url='https://openai.com/api/pricing/',
word_count_threshold=1,
extraction_strategy=LLMExtractionStrategy(
provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'),
schema=OpenAIModelFee.schema(),
extraction_type="schema",
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
Do not miss any models in the entire content. One extracted model JSON format should look like this:
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
),
bypass_cache=True,
)
print(result.extracted_content)
async def crawl_typescript_commits():
print("\n--- Advanced Multi-Page Crawling with JavaScript Execution ---")
first_commit = ""
async def on_execution_started(page):
nonlocal first_commit
try:
while True:
await page.wait_for_selector('li.Box-sc-g0xbh4-0 h4')
commit = await page.query_selector('li.Box-sc-g0xbh4-0 h4')
commit = await commit.evaluate('(element) => element.textContent')
commit = re.sub(r'\s+', '', commit)
if commit and commit != first_commit:
first_commit = commit
break
await asyncio.sleep(0.5)
except Exception as e:
print(f"Warning: New content didn't appear after JavaScript execution: {e}")
async with AsyncWebCrawler(verbose=True) as crawler:
crawler.crawler_strategy.set_hook('on_execution_started', on_execution_started)
url = "https://github.com/microsoft/TypeScript/commits/main"
session_id = "typescript_commits_session"
all_commits = []
js_next_page = """
const button = document.querySelector('a[data-testid="pagination-next-button"]');
if (button) button.click();
"""
for page in range(3): # Crawl 3 pages
result = await crawler.arun(
url=url,
session_id=session_id,
css_selector="li.Box-sc-g0xbh4-0",
js=js_next_page if page > 0 else None,
bypass_cache=True,
js_only=page > 0
)
assert result.success, f"Failed to crawl page {page + 1}"
soup = BeautifulSoup(result.cleaned_html, 'html.parser')
commits = soup.select("li")
all_commits.extend(commits)
print(f"Page {page + 1}: Found {len(commits)} commits")
await crawler.crawler_strategy.kill_session(session_id)
print(f"Successfully crawled {len(all_commits)} commits across 3 pages")
async def extract_news_teasers():
print("\n--- Using JsonCssExtractionStrategy for Fast Structured Output ---")
schema = {
"name": "News Teaser Extractor",
"baseSelector": ".wide-tease-item__wrapper",
"fields": [
{
"name": "category",
"selector": ".unibrow span[data-testid='unibrow-text']",
"type": "text",
},
{
"name": "headline",
"selector": ".wide-tease-item__headline",
"type": "text",
},
{
"name": "summary",
"selector": ".wide-tease-item__description",
"type": "text",
},
{
"name": "time",
"selector": "[data-testid='wide-tease-date']",
"type": "text",
},
{
"name": "image",
"type": "nested",
"selector": "picture.teasePicture img",
"fields": [
{"name": "src", "type": "attribute", "attribute": "src"},
{"name": "alt", "type": "attribute", "attribute": "alt"},
],
},
{
"name": "link",
"selector": "a[href]",
"type": "attribute",
"attribute": "href",
},
],
}
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
async with AsyncWebCrawler(verbose=True) as crawler:
result = await crawler.arun(
url="https://www.nbcnews.com/business",
extraction_strategy=extraction_strategy,
bypass_cache=True,
)
assert result.success, "Failed to crawl the page"
news_teasers = json.loads(result.extracted_content)
print(f"Successfully extracted {len(news_teasers)} news teasers")
print(json.dumps(news_teasers[0], indent=2))
async def speed_comparison():
print("\n--- Speed Comparison ---")
print("Firecrawl (simulated):")
print("Time taken: 7.02 seconds")
print("Content length: 42074 characters")
print("Images found: 49")
print()
async with AsyncWebCrawler() as crawler:
# Crawl4AI simple crawl
start = time.time()
result = await crawler.arun(
url="https://www.nbcnews.com/business",
word_count_threshold=0,
bypass_cache=True,
verbose=False
)
end = time.time()
print("Crawl4AI (simple crawl):")
print(f"Time taken: {end - start:.2f} seconds")
print(f"Content length: {len(result.markdown)} characters")
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
print()
# Crawl4AI with JavaScript execution
start = time.time()
result = await crawler.arun(
url="https://www.nbcnews.com/business",
js_code=["const loadMoreButton = Array.from(document.querySelectorAll('button')).find(button => button.textContent.includes('Load More')); loadMoreButton && loadMoreButton.click();"],
word_count_threshold=0,
bypass_cache=True,
verbose=False
)
end = time.time()
print("Crawl4AI (with JavaScript execution):")
print(f"Time taken: {end - start:.2f} seconds")
print(f"Content length: {len(result.markdown)} characters")
print(f"Images found: {result.markdown.count('cldnry.s-nbcnews.com')}")
print("\nNote on Speed Comparison:")
print("The speed test conducted here may not reflect optimal conditions.")
print("When we call Firecrawl's API, we're seeing its best performance,")
print("while Crawl4AI's performance is limited by the local network speed.")
print("For a more accurate comparison, it's recommended to run these tests")
print("on servers with a stable and fast internet connection.")
print("Despite these limitations, Crawl4AI still demonstrates faster performance.")
print("If you run these tests in an environment with better network conditions,")
print("you may observe an even more significant speed advantage for Crawl4AI.")
async def main():
await simple_crawl()
await js_and_css()
await use_proxy()
await extract_openai_fees()
await crawl_typescript_commits()
await extract_news_teasers()
await speed_comparison()
if __name__ == "__main__":
asyncio.run(main())