Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions backend/modules/llm/litellm_caller.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,13 @@ def _get_model_kwargs(self, model_name: str, temperature: Optional[float] = None
except ValueError as e:
logger.error(f"Failed to resolve API key for model {model_name}: {e}")
raise

if api_key:
# Always pass api_key to LiteLLM for all providers
kwargs["api_key"] = api_key

# Additionally set provider-specific env vars for LiteLLM's internal logic
if "openrouter" in model_config.model_url:
kwargs["api_key"] = api_key
# LiteLLM will automatically set the correct env var
os.environ["OPENROUTER_API_KEY"] = api_key
elif "openai" in model_config.model_url:
os.environ["OPENAI_API_KEY"] = api_key
Expand All @@ -99,6 +101,10 @@ def _get_model_kwargs(self, model_name: str, temperature: Optional[float] = None
os.environ["GOOGLE_API_KEY"] = api_key
elif "cerebras" in model_config.model_url:
os.environ["CEREBRAS_API_KEY"] = api_key
else:
# Custom endpoint - set OPENAI_API_KEY as fallback
# (most custom endpoints are OpenAI-compatible)
os.environ["OPENAI_API_KEY"] = api_key

# Set custom API base for non-standard endpoints
if hasattr(model_config, 'model_url') and model_config.model_url:
Expand Down
140 changes: 137 additions & 3 deletions backend/tests/test_llm_env_expansion.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,14 +146,148 @@ def test_litellm_caller_handles_literal_extra_headers(self):
)
}
)

# Create LiteLLMCaller
caller = LiteLLMCaller(llm_config, debug_mode=True)

# Get model kwargs - this should work without errors
model_kwargs = caller._get_model_kwargs("test-model")

# Verify that extra_headers were passed through
assert "extra_headers" in model_kwargs
assert model_kwargs["extra_headers"]["HTTP-Referer"] == "https://literal-app.com"
assert model_kwargs["extra_headers"]["X-Title"] == "LiteralApp"

def test_custom_endpoint_with_env_var_api_key(self, monkeypatch):
"""Custom endpoint should pass api_key in kwargs when using env var."""
monkeypatch.setenv("CUSTOM_LLM_KEY", "sk-custom-12345")

# Create LLM config for custom endpoint with env var in api_key
llm_config = LLMConfig(
models={
"custom-model": ModelConfig(
model_name="custom-model-name",
model_url="https://custom-llm.example.com/v1",
api_key="${CUSTOM_LLM_KEY}"
)
}
)

# Create LiteLLMCaller
caller = LiteLLMCaller(llm_config, debug_mode=True)

# Get model kwargs
model_kwargs = caller._get_model_kwargs("custom-model")

# Verify that api_key is in kwargs (critical for custom endpoints)
assert "api_key" in model_kwargs
assert model_kwargs["api_key"] == "sk-custom-12345"

# Verify that api_base is set for custom endpoint
assert "api_base" in model_kwargs
assert model_kwargs["api_base"] == "https://custom-llm.example.com/v1"

# Verify fallback env var is set for OpenAI-compatible endpoints
import os
assert os.environ.get("OPENAI_API_KEY") == "sk-custom-12345"

def test_custom_endpoint_with_literal_api_key(self):
"""Custom endpoint should pass api_key in kwargs when using literal value."""
# Create LLM config for custom endpoint with literal api_key
llm_config = LLMConfig(
models={
"custom-model": ModelConfig(
model_name="custom-model-name",
model_url="https://custom-llm.example.com/v1",
api_key="sk-literal-custom-key"
)
}
)

# Create LiteLLMCaller
caller = LiteLLMCaller(llm_config, debug_mode=True)

# Get model kwargs
model_kwargs = caller._get_model_kwargs("custom-model")

# Verify that api_key is in kwargs (critical for custom endpoints)
assert "api_key" in model_kwargs
assert model_kwargs["api_key"] == "sk-literal-custom-key"

# Verify that api_base is set for custom endpoint
assert "api_base" in model_kwargs
assert model_kwargs["api_base"] == "https://custom-llm.example.com/v1"
Comment on lines +203 to +228
Copy link

Copilot AI Nov 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Test isolation issue: This test modifies os.environ["OPENAI_API_KEY"] via the implementation code (line 107 in litellm_caller.py) but doesn't clean it up, which could pollute other tests. Consider using monkeypatch fixture to ensure the environment is restored after the test, similar to test_custom_endpoint_with_env_var_api_key.

Copilot uses AI. Check for mistakes.

def test_custom_endpoint_with_extra_headers(self, monkeypatch):
"""Custom endpoint should handle extra_headers correctly."""
monkeypatch.setenv("CUSTOM_API_KEY", "sk-custom-auth")
monkeypatch.setenv("CUSTOM_TENANT", "tenant-123")

# Create LLM config for custom endpoint with extra headers
llm_config = LLMConfig(
models={
"custom-model": ModelConfig(
model_name="custom-model-name",
model_url="https://custom-llm.example.com/v1",
api_key="${CUSTOM_API_KEY}",
extra_headers={
"X-Tenant-ID": "${CUSTOM_TENANT}",
"X-Custom-Header": "custom-value"
}
)
}
)

# Create LiteLLMCaller
caller = LiteLLMCaller(llm_config, debug_mode=True)

# Get model kwargs
model_kwargs = caller._get_model_kwargs("custom-model")

# Verify api_key is passed
assert "api_key" in model_kwargs
assert model_kwargs["api_key"] == "sk-custom-auth"

# Verify extra_headers are resolved and passed
assert "extra_headers" in model_kwargs
assert model_kwargs["extra_headers"]["X-Tenant-ID"] == "tenant-123"
assert model_kwargs["extra_headers"]["X-Custom-Header"] == "custom-value"

# Verify api_base is set
assert "api_base" in model_kwargs

def test_known_providers_still_get_api_key_in_kwargs(self):
"""Verify that known providers also get api_key in kwargs (backward compatibility)."""
# Test OpenAI
llm_config = LLMConfig(
models={
"openai-model": ModelConfig(
model_name="gpt-4",
model_url="https://api.openai.com/v1",
api_key="sk-openai-test"
)
}
)
caller = LiteLLMCaller(llm_config, debug_mode=True)
model_kwargs = caller._get_model_kwargs("openai-model")

# OpenAI should get api_key in kwargs
assert "api_key" in model_kwargs
assert model_kwargs["api_key"] == "sk-openai-test"

# Test OpenRouter
llm_config = LLMConfig(
models={
"openrouter-model": ModelConfig(
model_name="meta-llama/llama-3-70b",
model_url="https://openrouter.ai/api/v1",
api_key="sk-or-test"
)
}
)
caller = LiteLLMCaller(llm_config, debug_mode=True)
model_kwargs = caller._get_model_kwargs("openrouter-model")

# OpenRouter should get api_key in kwargs
assert "api_key" in model_kwargs
assert model_kwargs["api_key"] == "sk-or-test"
Copy link

Copilot AI Nov 24, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Test isolation issue: This test modifies environment variables (OPENAI_API_KEY, OPENROUTER_API_KEY) via the implementation code but doesn't use monkeypatch to clean them up. This could pollute other tests. Consider adding monkeypatch as a parameter and using it to set/verify environment variables to ensure proper cleanup.

Copilot uses AI. Check for mistakes.
Loading