From 95051020f4f1888ebb431805e9b36168c8ddb7e0 Mon Sep 17 00:00:00 2001 From: ntohidi Date: Thu, 21 Aug 2025 14:01:04 +0800 Subject: [PATCH] fix(docker): Fix LLM API key handling for multi-provider support Previously, the system incorrectly used OPENAI_API_KEY for all LLM providers due to a hardcoded api_key_env fallback in config.yml. This caused authentication errors when using non-OpenAI providers like Gemini. Changes: - Remove api_key_env from config.yml to let litellm handle provider-specific env vars - Simplify get_llm_api_key() to return None, allowing litellm to auto-detect keys - Update validate_llm_provider() to trust litellm's built-in key detection - Update documentation to reflect the new automatic key handling The fix leverages litellm's existing capability to automatically find the correct environment variable for each provider (OPENAI_API_KEY, GEMINI_API_TOKEN, etc.) without manual configuration. ref #1291 --- deploy/docker/README.md | 3 +-- deploy/docker/api.py | 6 +++--- deploy/docker/config.yml | 3 +-- deploy/docker/utils.py | 29 ++++++++++------------------ docs/md_v2/core/docker-deployment.md | 5 ++--- 5 files changed, 17 insertions(+), 29 deletions(-) diff --git a/deploy/docker/README.md b/deploy/docker/README.md index 49e0030b..d35050cc 100644 --- a/deploy/docker/README.md +++ b/deploy/docker/README.md @@ -692,8 +692,7 @@ app: # Default LLM Configuration llm: provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var - api_key_env: "OPENAI_API_KEY" - # api_key: sk-... # If you pass the API key directly then api_key_env will be ignored + # api_key: sk-... # If you pass the API key directly (not recommended) # Redis Configuration (Used by internal Redis server managed by supervisord) redis: diff --git a/deploy/docker/api.py b/deploy/docker/api.py index 627d0bf4..c01c5ca7 100644 --- a/deploy/docker/api.py +++ b/deploy/docker/api.py @@ -96,7 +96,7 @@ async def handle_llm_qa( response = perform_completion_with_backoff( provider=config["llm"]["provider"], prompt_with_variables=prompt, - api_token=get_llm_api_key(config) + api_token=get_llm_api_key(config) # Returns None to let litellm handle it ) return response.choices[0].message.content @@ -127,7 +127,7 @@ async def process_llm_extraction( "error": error_msg }) return - api_key = get_llm_api_key(config, provider) + api_key = get_llm_api_key(config, provider) # Returns None to let litellm handle it llm_strategy = LLMExtractionStrategy( llm_config=LLMConfig( provider=provider or config["llm"]["provider"], @@ -203,7 +203,7 @@ async def handle_markdown_request( FilterType.LLM: LLMContentFilter( llm_config=LLMConfig( provider=provider or config["llm"]["provider"], - api_token=get_llm_api_key(config, provider), + api_token=get_llm_api_key(config, provider), # Returns None to let litellm handle it ), instruction=query or "Extract main content" ) diff --git a/deploy/docker/config.yml b/deploy/docker/config.yml index c81badc4..f5046613 100644 --- a/deploy/docker/config.yml +++ b/deploy/docker/config.yml @@ -11,8 +11,7 @@ app: # Default LLM Configuration llm: provider: "openai/gpt-4o-mini" - api_key_env: "OPENAI_API_KEY" - # api_key: sk-... # If you pass the API key directly then api_key_env will be ignored + # api_key: sk-... # If you pass the API key directly (not recommended) # Redis Configuration redis: diff --git a/deploy/docker/utils.py b/deploy/docker/utils.py index 2e2a80ac..8ec591e5 100644 --- a/deploy/docker/utils.py +++ b/deploy/docker/utils.py @@ -71,7 +71,7 @@ def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]: -def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> str: +def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> Optional[str]: """Get the appropriate API key based on the LLM provider. Args: @@ -79,19 +79,14 @@ def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> str: provider: Optional provider override (e.g., "openai/gpt-4") Returns: - The API key for the provider, or empty string if not found + The API key if directly configured, otherwise None to let litellm handle it """ - - # Use provided provider or fall back to config - if not provider: - provider = config["llm"]["provider"] - - # Check if direct API key is configured + # Check if direct API key is configured (for backward compatibility) if "api_key" in config["llm"]: return config["llm"]["api_key"] - # Fall back to the configured api_key_env if no match - return os.environ.get(config["llm"].get("api_key_env", ""), "") + # Return None - litellm will automatically find the right environment variable + return None def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple[bool, str]: @@ -104,16 +99,12 @@ def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple Returns: Tuple of (is_valid, error_message) """ - # Use provided provider or fall back to config - if not provider: - provider = config["llm"]["provider"] - - # Get the API key for this provider - api_key = get_llm_api_key(config, provider) - - if not api_key: - return False, f"No API key found for provider '{provider}'. Please set the appropriate environment variable." + # If a direct API key is configured, validation passes + if "api_key" in config["llm"]: + return True, "" + # Otherwise, trust that litellm will find the appropriate environment variable + # We can't easily validate this without reimplementing litellm's logic return True, "" diff --git a/docs/md_v2/core/docker-deployment.md b/docs/md_v2/core/docker-deployment.md index 6e9a9704..deda8163 100644 --- a/docs/md_v2/core/docker-deployment.md +++ b/docs/md_v2/core/docker-deployment.md @@ -176,7 +176,7 @@ The Docker setup now supports flexible LLM provider configuration through three 3. **Config File Default**: Falls back to `config.yml` (default: `openai/gpt-4o-mini`) -The system automatically selects the appropriate API key based on the configured `api_key_env` in the config file. +The system automatically selects the appropriate API key based on the provider. LiteLLM handles finding the correct environment variable for each provider (e.g., OPENAI_API_KEY for OpenAI, GEMINI_API_TOKEN for Google Gemini, etc.). #### 3. Build and Run with Compose @@ -693,8 +693,7 @@ app: # Default LLM Configuration llm: provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var - api_key_env: "OPENAI_API_KEY" - # api_key: sk-... # If you pass the API key directly then api_key_env will be ignored + # api_key: sk-... # If you pass the API key directly (not recommended) # Redis Configuration (Used by internal Redis server managed by supervisord) redis: