#1156: Refactor completion function calls to use asynchronous version

This commit is contained in:
Ahmed-Tawfik94
2025-05-27 15:10:34 +08:00
parent d9b3db925a
commit 2b2ef12e25
2 changed files with 8 additions and 8 deletions

View File

@@ -24,7 +24,7 @@ from crawl4ai import (
RateLimiter, RateLimiter,
LLMConfig LLMConfig
) )
from crawl4ai.utils import perform_completion_with_backoff from crawl4ai.utils import aperform_completion_with_backoff
from crawl4ai.content_filter_strategy import ( from crawl4ai.content_filter_strategy import (
PruningContentFilter, PruningContentFilter,
BM25ContentFilter, BM25ContentFilter,
@@ -88,7 +88,7 @@ async def handle_llm_qa(
Answer:""" Answer:"""
response = perform_completion_with_backoff( response = await aperform_completion_with_backoff(
provider=config["llm"]["provider"], provider=config["llm"]["provider"],
prompt_with_variables=prompt, prompt_with_variables=prompt,
api_token=os.environ.get(config["llm"].get("api_key_env", "")) api_token=os.environ.get(config["llm"].get("api_key_env", ""))

View File

@@ -3553,7 +3553,7 @@ from .utils import * # noqa: F403
from .utils import ( from .utils import (
sanitize_html, sanitize_html,
escape_json_string, escape_json_string,
perform_completion_with_backoff, aperform_completion_with_backoff,
extract_xml_data, extract_xml_data,
split_and_parse_json_objects, split_and_parse_json_objects,
sanitize_input_encode, sanitize_input_encode,
@@ -4162,7 +4162,7 @@ class LLMExtractionStrategy(ExtractionStrategy):
) )
try: try:
response = perform_completion_with_backoff( response = await aperform_completion_with_backoff(
self.llm_config.provider, self.llm_config.provider,
prompt_with_variables, prompt_with_variables,
self.llm_config.api_token, self.llm_config.api_token,
@@ -4646,7 +4646,7 @@ class JsonElementExtractionStrategy(ExtractionStrategy):
dict: Generated schema following the JsonElementExtractionStrategy format dict: Generated schema following the JsonElementExtractionStrategy format
""" """
from .prompts import JSON_SCHEMA_BUILDER from .prompts import JSON_SCHEMA_BUILDER
from .utils import perform_completion_with_backoff from .utils import aperform_completion_with_backoff
for name, message in JsonElementExtractionStrategy._GENERATE_SCHEMA_UNWANTED_PROPS.items(): for name, message in JsonElementExtractionStrategy._GENERATE_SCHEMA_UNWANTED_PROPS.items():
if locals()[name] is not None: if locals()[name] is not None:
raise AttributeError(f"Setting '{name}' is deprecated. {message}") raise AttributeError(f"Setting '{name}' is deprecated. {message}")
@@ -4709,7 +4709,7 @@ In this scenario, use your best judgment to generate the schema. You need to exa
try: try:
# Call LLM with backoff handling # Call LLM with backoff handling
response = perform_completion_with_backoff( response = await aperform_completion_with_backoff(
provider=llm_config.provider, provider=llm_config.provider,
prompt_with_variables="\n\n".join([system_message["content"], user_message["content"]]), prompt_with_variables="\n\n".join([system_message["content"], user_message["content"]]),
json_response = True, json_response = True,
@@ -5597,7 +5597,7 @@ from bs4 import NavigableString, Comment
from .utils import ( from .utils import (
clean_tokens, clean_tokens,
perform_completion_with_backoff, aperform_completion_with_backoff,
escape_json_string, escape_json_string,
sanitize_html, sanitize_html,
get_home_folder, get_home_folder,
@@ -6556,7 +6556,7 @@ class LLMContentFilter(RelevantContentFilter):
tag="CHUNK", tag="CHUNK",
params={"chunk_num": i + 1}, params={"chunk_num": i + 1},
) )
return perform_completion_with_backoff( return await aperform_completion_with_backoff(
provider, provider,
prompt, prompt,
api_token, api_token,