fix(docker): Fix LLM API key handling for multi-provider support
Previously, the system incorrectly used OPENAI_API_KEY for all LLM providers due to a hardcoded api_key_env fallback in config.yml. This caused authentication errors when using non-OpenAI providers like Gemini. Changes: - Remove api_key_env from config.yml to let litellm handle provider-specific env vars - Simplify get_llm_api_key() to return None, allowing litellm to auto-detect keys - Update validate_llm_provider() to trust litellm's built-in key detection - Update documentation to reflect the new automatic key handling The fix leverages litellm's existing capability to automatically find the correct environment variable for each provider (OPENAI_API_KEY, GEMINI_API_TOKEN, etc.) without manual configuration. ref #1291
This commit is contained in:
@@ -692,8 +692,7 @@ app:
|
|||||||
# Default LLM Configuration
|
# Default LLM Configuration
|
||||||
llm:
|
llm:
|
||||||
provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var
|
provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var
|
||||||
api_key_env: "OPENAI_API_KEY"
|
# api_key: sk-... # If you pass the API key directly (not recommended)
|
||||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
|
||||||
|
|
||||||
# Redis Configuration (Used by internal Redis server managed by supervisord)
|
# Redis Configuration (Used by internal Redis server managed by supervisord)
|
||||||
redis:
|
redis:
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ async def handle_llm_qa(
|
|||||||
response = perform_completion_with_backoff(
|
response = perform_completion_with_backoff(
|
||||||
provider=config["llm"]["provider"],
|
provider=config["llm"]["provider"],
|
||||||
prompt_with_variables=prompt,
|
prompt_with_variables=prompt,
|
||||||
api_token=get_llm_api_key(config)
|
api_token=get_llm_api_key(config) # Returns None to let litellm handle it
|
||||||
)
|
)
|
||||||
|
|
||||||
return response.choices[0].message.content
|
return response.choices[0].message.content
|
||||||
@@ -127,7 +127,7 @@ async def process_llm_extraction(
|
|||||||
"error": error_msg
|
"error": error_msg
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
api_key = get_llm_api_key(config, provider)
|
api_key = get_llm_api_key(config, provider) # Returns None to let litellm handle it
|
||||||
llm_strategy = LLMExtractionStrategy(
|
llm_strategy = LLMExtractionStrategy(
|
||||||
llm_config=LLMConfig(
|
llm_config=LLMConfig(
|
||||||
provider=provider or config["llm"]["provider"],
|
provider=provider or config["llm"]["provider"],
|
||||||
@@ -203,7 +203,7 @@ async def handle_markdown_request(
|
|||||||
FilterType.LLM: LLMContentFilter(
|
FilterType.LLM: LLMContentFilter(
|
||||||
llm_config=LLMConfig(
|
llm_config=LLMConfig(
|
||||||
provider=provider or config["llm"]["provider"],
|
provider=provider or config["llm"]["provider"],
|
||||||
api_token=get_llm_api_key(config, provider),
|
api_token=get_llm_api_key(config, provider), # Returns None to let litellm handle it
|
||||||
),
|
),
|
||||||
instruction=query or "Extract main content"
|
instruction=query or "Extract main content"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -11,8 +11,7 @@ app:
|
|||||||
# Default LLM Configuration
|
# Default LLM Configuration
|
||||||
llm:
|
llm:
|
||||||
provider: "openai/gpt-4o-mini"
|
provider: "openai/gpt-4o-mini"
|
||||||
api_key_env: "OPENAI_API_KEY"
|
# api_key: sk-... # If you pass the API key directly (not recommended)
|
||||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
|
||||||
|
|
||||||
# Redis Configuration
|
# Redis Configuration
|
||||||
redis:
|
redis:
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> str:
|
def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> Optional[str]:
|
||||||
"""Get the appropriate API key based on the LLM provider.
|
"""Get the appropriate API key based on the LLM provider.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -79,19 +79,14 @@ def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> str:
|
|||||||
provider: Optional provider override (e.g., "openai/gpt-4")
|
provider: Optional provider override (e.g., "openai/gpt-4")
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The API key for the provider, or empty string if not found
|
The API key if directly configured, otherwise None to let litellm handle it
|
||||||
"""
|
"""
|
||||||
|
# Check if direct API key is configured (for backward compatibility)
|
||||||
# Use provided provider or fall back to config
|
|
||||||
if not provider:
|
|
||||||
provider = config["llm"]["provider"]
|
|
||||||
|
|
||||||
# Check if direct API key is configured
|
|
||||||
if "api_key" in config["llm"]:
|
if "api_key" in config["llm"]:
|
||||||
return config["llm"]["api_key"]
|
return config["llm"]["api_key"]
|
||||||
|
|
||||||
# Fall back to the configured api_key_env if no match
|
# Return None - litellm will automatically find the right environment variable
|
||||||
return os.environ.get(config["llm"].get("api_key_env", ""), "")
|
return None
|
||||||
|
|
||||||
|
|
||||||
def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple[bool, str]:
|
def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple[bool, str]:
|
||||||
@@ -104,16 +99,12 @@ def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple
|
|||||||
Returns:
|
Returns:
|
||||||
Tuple of (is_valid, error_message)
|
Tuple of (is_valid, error_message)
|
||||||
"""
|
"""
|
||||||
# Use provided provider or fall back to config
|
# If a direct API key is configured, validation passes
|
||||||
if not provider:
|
if "api_key" in config["llm"]:
|
||||||
provider = config["llm"]["provider"]
|
return True, ""
|
||||||
|
|
||||||
# Get the API key for this provider
|
|
||||||
api_key = get_llm_api_key(config, provider)
|
|
||||||
|
|
||||||
if not api_key:
|
|
||||||
return False, f"No API key found for provider '{provider}'. Please set the appropriate environment variable."
|
|
||||||
|
|
||||||
|
# Otherwise, trust that litellm will find the appropriate environment variable
|
||||||
|
# We can't easily validate this without reimplementing litellm's logic
|
||||||
return True, ""
|
return True, ""
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -176,7 +176,7 @@ The Docker setup now supports flexible LLM provider configuration through three
|
|||||||
|
|
||||||
3. **Config File Default**: Falls back to `config.yml` (default: `openai/gpt-4o-mini`)
|
3. **Config File Default**: Falls back to `config.yml` (default: `openai/gpt-4o-mini`)
|
||||||
|
|
||||||
The system automatically selects the appropriate API key based on the configured `api_key_env` in the config file.
|
The system automatically selects the appropriate API key based on the provider. LiteLLM handles finding the correct environment variable for each provider (e.g., OPENAI_API_KEY for OpenAI, GEMINI_API_TOKEN for Google Gemini, etc.).
|
||||||
|
|
||||||
#### 3. Build and Run with Compose
|
#### 3. Build and Run with Compose
|
||||||
|
|
||||||
@@ -693,8 +693,7 @@ app:
|
|||||||
# Default LLM Configuration
|
# Default LLM Configuration
|
||||||
llm:
|
llm:
|
||||||
provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var
|
provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var
|
||||||
api_key_env: "OPENAI_API_KEY"
|
# api_key: sk-... # If you pass the API key directly (not recommended)
|
||||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
|
||||||
|
|
||||||
# Redis Configuration (Used by internal Redis server managed by supervisord)
|
# Redis Configuration (Used by internal Redis server managed by supervisord)
|
||||||
redis:
|
redis:
|
||||||
|
|||||||
Reference in New Issue
Block a user