-
Notifications
You must be signed in to change notification settings - Fork 5
feat(llm): enhance LiteLLMCaller to support custom API key handling a… #118
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
c44ab9a
7fe4f54
63f6f31
fe32d2e
027e58c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -146,14 +146,148 @@ def test_litellm_caller_handles_literal_extra_headers(self): | |
| ) | ||
| } | ||
| ) | ||
|
|
||
| # Create LiteLLMCaller | ||
| caller = LiteLLMCaller(llm_config, debug_mode=True) | ||
|
|
||
| # Get model kwargs - this should work without errors | ||
| model_kwargs = caller._get_model_kwargs("test-model") | ||
|
|
||
| # Verify that extra_headers were passed through | ||
| assert "extra_headers" in model_kwargs | ||
| assert model_kwargs["extra_headers"]["HTTP-Referer"] == "https://literal-app.com" | ||
| assert model_kwargs["extra_headers"]["X-Title"] == "LiteralApp" | ||
|
|
||
| def test_custom_endpoint_with_env_var_api_key(self, monkeypatch): | ||
| """Custom endpoint should pass api_key in kwargs when using env var.""" | ||
| monkeypatch.setenv("CUSTOM_LLM_KEY", "sk-custom-12345") | ||
|
|
||
| # Create LLM config for custom endpoint with env var in api_key | ||
| llm_config = LLMConfig( | ||
| models={ | ||
| "custom-model": ModelConfig( | ||
| model_name="custom-model-name", | ||
| model_url="https://custom-llm.example.com/v1", | ||
| api_key="${CUSTOM_LLM_KEY}" | ||
| ) | ||
| } | ||
| ) | ||
|
|
||
| # Create LiteLLMCaller | ||
| caller = LiteLLMCaller(llm_config, debug_mode=True) | ||
|
|
||
| # Get model kwargs | ||
| model_kwargs = caller._get_model_kwargs("custom-model") | ||
|
|
||
| # Verify that api_key is in kwargs (critical for custom endpoints) | ||
| assert "api_key" in model_kwargs | ||
| assert model_kwargs["api_key"] == "sk-custom-12345" | ||
|
|
||
| # Verify that api_base is set for custom endpoint | ||
| assert "api_base" in model_kwargs | ||
| assert model_kwargs["api_base"] == "https://custom-llm.example.com/v1" | ||
|
|
||
| # Verify fallback env var is set for OpenAI-compatible endpoints | ||
| import os | ||
| assert os.environ.get("OPENAI_API_KEY") == "sk-custom-12345" | ||
|
|
||
| def test_custom_endpoint_with_literal_api_key(self): | ||
| """Custom endpoint should pass api_key in kwargs when using literal value.""" | ||
| # Create LLM config for custom endpoint with literal api_key | ||
| llm_config = LLMConfig( | ||
| models={ | ||
| "custom-model": ModelConfig( | ||
| model_name="custom-model-name", | ||
| model_url="https://custom-llm.example.com/v1", | ||
| api_key="sk-literal-custom-key" | ||
| ) | ||
| } | ||
| ) | ||
|
|
||
| # Create LiteLLMCaller | ||
| caller = LiteLLMCaller(llm_config, debug_mode=True) | ||
|
|
||
| # Get model kwargs | ||
| model_kwargs = caller._get_model_kwargs("custom-model") | ||
|
|
||
| # Verify that api_key is in kwargs (critical for custom endpoints) | ||
| assert "api_key" in model_kwargs | ||
| assert model_kwargs["api_key"] == "sk-literal-custom-key" | ||
|
|
||
| # Verify that api_base is set for custom endpoint | ||
| assert "api_base" in model_kwargs | ||
| assert model_kwargs["api_base"] == "https://custom-llm.example.com/v1" | ||
|
|
||
| def test_custom_endpoint_with_extra_headers(self, monkeypatch): | ||
| """Custom endpoint should handle extra_headers correctly.""" | ||
| monkeypatch.setenv("CUSTOM_API_KEY", "sk-custom-auth") | ||
| monkeypatch.setenv("CUSTOM_TENANT", "tenant-123") | ||
|
|
||
| # Create LLM config for custom endpoint with extra headers | ||
| llm_config = LLMConfig( | ||
| models={ | ||
| "custom-model": ModelConfig( | ||
| model_name="custom-model-name", | ||
| model_url="https://custom-llm.example.com/v1", | ||
| api_key="${CUSTOM_API_KEY}", | ||
| extra_headers={ | ||
| "X-Tenant-ID": "${CUSTOM_TENANT}", | ||
| "X-Custom-Header": "custom-value" | ||
| } | ||
| ) | ||
| } | ||
| ) | ||
|
|
||
| # Create LiteLLMCaller | ||
| caller = LiteLLMCaller(llm_config, debug_mode=True) | ||
|
|
||
| # Get model kwargs | ||
| model_kwargs = caller._get_model_kwargs("custom-model") | ||
|
|
||
| # Verify api_key is passed | ||
| assert "api_key" in model_kwargs | ||
| assert model_kwargs["api_key"] == "sk-custom-auth" | ||
|
|
||
| # Verify extra_headers are resolved and passed | ||
| assert "extra_headers" in model_kwargs | ||
| assert model_kwargs["extra_headers"]["X-Tenant-ID"] == "tenant-123" | ||
| assert model_kwargs["extra_headers"]["X-Custom-Header"] == "custom-value" | ||
|
|
||
| # Verify api_base is set | ||
| assert "api_base" in model_kwargs | ||
|
|
||
| def test_known_providers_still_get_api_key_in_kwargs(self): | ||
| """Verify that known providers also get api_key in kwargs (backward compatibility).""" | ||
| # Test OpenAI | ||
| llm_config = LLMConfig( | ||
| models={ | ||
| "openai-model": ModelConfig( | ||
| model_name="gpt-4", | ||
| model_url="https://api.openai.com/v1", | ||
| api_key="sk-openai-test" | ||
| ) | ||
| } | ||
| ) | ||
| caller = LiteLLMCaller(llm_config, debug_mode=True) | ||
| model_kwargs = caller._get_model_kwargs("openai-model") | ||
|
|
||
| # OpenAI should get api_key in kwargs | ||
| assert "api_key" in model_kwargs | ||
| assert model_kwargs["api_key"] == "sk-openai-test" | ||
|
|
||
| # Test OpenRouter | ||
| llm_config = LLMConfig( | ||
| models={ | ||
| "openrouter-model": ModelConfig( | ||
| model_name="meta-llama/llama-3-70b", | ||
| model_url="https://openrouter.ai/api/v1", | ||
| api_key="sk-or-test" | ||
| ) | ||
| } | ||
| ) | ||
| caller = LiteLLMCaller(llm_config, debug_mode=True) | ||
| model_kwargs = caller._get_model_kwargs("openrouter-model") | ||
|
|
||
| # OpenRouter should get api_key in kwargs | ||
| assert "api_key" in model_kwargs | ||
| assert model_kwargs["api_key"] == "sk-or-test" | ||
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Test isolation issue: This test modifies
os.environ["OPENAI_API_KEY"]via the implementation code (line 107 in litellm_caller.py) but doesn't clean it up, which could pollute other tests. Consider usingmonkeypatchfixture to ensure the environment is restored after the test, similar totest_custom_endpoint_with_env_var_api_key.