Skip to content

Commit 7fe4f54

Browse files
committed
feat(llm): improve environment variable handling for API keys in LiteLLMCaller
1 parent c44ab9a commit 7fe4f54

File tree

2 files changed

+132
-12
lines changed

2 files changed

+132
-12
lines changed

backend/modules/llm/litellm_caller.py

Lines changed: 30 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,14 @@
2727

2828

2929
class LiteLLMCaller:
30-
"""Clean interface for all LLM calling patterns using LiteLLM."""
30+
"""Clean interface for all LLM calling patterns using LiteLLM.
31+
32+
Note: this class may set provider-specific LLM API key environment
33+
variables (for example ``OPENAI_API_KEY``) to maintain compatibility
34+
with LiteLLM's internal provider detection. These mutations are
35+
best-effort only and are not intended to provide strong isolation
36+
guarantees in multi-tenant or highly concurrent environments.
37+
"""
3138

3239
def __init__(self, llm_config=None, debug_mode: bool = False):
3340
"""Initialize with optional config dependency injection."""
@@ -91,20 +98,34 @@ def _get_model_kwargs(self, model_name: str, temperature: Optional[float] = None
9198
kwargs["api_key"] = api_key
9299

93100
# Additionally set provider-specific env vars for LiteLLM's internal logic
101+
def _set_env_var_if_needed(env_key: str, value: str) -> None:
102+
existing = os.environ.get(env_key)
103+
if existing is None:
104+
os.environ[env_key] = value
105+
elif existing != value:
106+
logger.warning(
107+
"Overwriting existing environment variable %s for model %s",
108+
env_key,
109+
model_name,
110+
)
111+
os.environ[env_key] = value
112+
94113
if "openrouter" in model_config.model_url:
95-
os.environ["OPENROUTER_API_KEY"] = api_key
114+
_set_env_var_if_needed("OPENROUTER_API_KEY", api_key)
96115
elif "openai" in model_config.model_url:
97-
os.environ["OPENAI_API_KEY"] = api_key
116+
_set_env_var_if_needed("OPENAI_API_KEY", api_key)
98117
elif "anthropic" in model_config.model_url:
99-
os.environ["ANTHROPIC_API_KEY"] = api_key
118+
_set_env_var_if_needed("ANTHROPIC_API_KEY", api_key)
100119
elif "google" in model_config.model_url:
101-
os.environ["GOOGLE_API_KEY"] = api_key
120+
_set_env_var_if_needed("GOOGLE_API_KEY", api_key)
102121
elif "cerebras" in model_config.model_url:
103-
os.environ["CEREBRAS_API_KEY"] = api_key
122+
_set_env_var_if_needed("CEREBRAS_API_KEY", api_key)
104123
else:
105-
# Custom endpoint - set OPENAI_API_KEY as fallback
106-
# (most custom endpoints are OpenAI-compatible)
107-
os.environ["OPENAI_API_KEY"] = api_key
124+
# Custom endpoint - set OPENAI_API_KEY as fallback for
125+
# OpenAI-compatible endpoints. This is a heuristic and
126+
# only updates the env var if it is unset or already
127+
# matches the same value.
128+
_set_env_var_if_needed("OPENAI_API_KEY", api_key)
108129

109130
# Set custom API base for non-standard endpoints
110131
if hasattr(model_config, 'model_url') and model_config.model_url:

backend/tests/test_llm_env_expansion.py

Lines changed: 102 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ def test_litellm_caller_resolves_api_key_env_var(self, monkeypatch):
3333
import os
3434
assert os.environ.get("OPENAI_API_KEY") == "sk-test-12345"
3535

36+
# Cleanup to avoid leaking into other tests
37+
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
38+
3639
def test_litellm_caller_raises_on_missing_api_key_env_var(self):
3740
"""LiteLLMCaller should raise ValueError when api_key env var is missing."""
3841
# Create LLM config with missing env var in api_key
@@ -53,7 +56,7 @@ def test_litellm_caller_raises_on_missing_api_key_env_var(self):
5356
with pytest.raises(ValueError, match="Environment variable 'MISSING_OPENAI_KEY' is not set"):
5457
caller._get_model_kwargs("test-model")
5558

56-
def test_litellm_caller_handles_literal_api_key(self):
59+
def test_litellm_caller_handles_literal_api_key(self, monkeypatch):
5760
"""LiteLLMCaller should handle literal api_key values."""
5861
# Create LLM config with literal api_key
5962
llm_config = LLMConfig(
@@ -71,11 +74,14 @@ def test_litellm_caller_handles_literal_api_key(self):
7174

7275
# Get model kwargs - this should work without errors
7376
_ = caller._get_model_kwargs("test-model")
74-
77+
7578
# Verify that the environment variable was set
7679
import os
7780
assert os.environ.get("OPENAI_API_KEY") == "sk-literal-key-12345"
7881

82+
# Cleanup to avoid leaking into other tests
83+
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
84+
7985
def test_litellm_caller_resolves_extra_headers_env_vars(self, monkeypatch):
8086
"""LiteLLMCaller should resolve environment variables in extra_headers."""
8187
monkeypatch.setenv("TEST_REFERER", "https://myapp.com")
@@ -191,6 +197,9 @@ def test_custom_endpoint_with_env_var_api_key(self, monkeypatch):
191197
import os
192198
assert os.environ.get("OPENAI_API_KEY") == "sk-custom-12345"
193199

200+
# Cleanup to avoid leaking into other tests
201+
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
202+
194203
def test_custom_endpoint_with_literal_api_key(self):
195204
"""Custom endpoint should pass api_key in kwargs when using literal value."""
196205
# Create LLM config for custom endpoint with literal api_key
@@ -218,6 +227,90 @@ def test_custom_endpoint_with_literal_api_key(self):
218227
assert "api_base" in model_kwargs
219228
assert model_kwargs["api_base"] == "https://custom-llm.example.com/v1"
220229

230+
def test_openai_env_not_overwritten_if_same_value(self, monkeypatch):
231+
"""OPENAI_API_KEY is left as-is when value matches."""
232+
# Pre-set env to a specific value
233+
monkeypatch.setenv("OPENAI_API_KEY", "sk-openai-same")
234+
235+
llm_config = LLMConfig(
236+
models={
237+
"openai-model": ModelConfig(
238+
model_name="gpt-4",
239+
model_url="https://api.openai.com/v1",
240+
api_key="sk-openai-same",
241+
)
242+
}
243+
)
244+
245+
caller = LiteLLMCaller(llm_config, debug_mode=True)
246+
model_kwargs = caller._get_model_kwargs("openai-model")
247+
248+
import os
249+
# Still should have correct key in kwargs
250+
assert model_kwargs["api_key"] == "sk-openai-same"
251+
# Env var should remain the same
252+
assert os.environ.get("OPENAI_API_KEY") == "sk-openai-same"
253+
254+
def test_openai_env_overwritten_with_warning(self, monkeypatch, caplog):
255+
"""OPENAI_API_KEY overwrite should occur with a warning when value differs."""
256+
monkeypatch.setenv("OPENAI_API_KEY", "sk-openai-original")
257+
258+
llm_config = LLMConfig(
259+
models={
260+
"openai-model": ModelConfig(
261+
model_name="gpt-4",
262+
model_url="https://api.openai.com/v1",
263+
api_key="sk-openai-new",
264+
)
265+
}
266+
)
267+
268+
caller = LiteLLMCaller(llm_config, debug_mode=True)
269+
270+
with caplog.at_level("WARNING"):
271+
model_kwargs = caller._get_model_kwargs("openai-model")
272+
273+
import os
274+
# kwargs should use the new key
275+
assert model_kwargs["api_key"] == "sk-openai-new"
276+
# Env var should be overwritten to the new value
277+
assert os.environ.get("OPENAI_API_KEY") == "sk-openai-new"
278+
# A warning about overwriting should be logged
279+
assert any("Overwriting existing environment variable OPENAI_API_KEY" in rec.getMessage() for rec in caplog.records)
280+
281+
def test_openai_and_custom_models_resolved_in_succession(self, monkeypatch):
282+
"""Sequence of OpenAI then custom endpoint should keep last key in env while kwargs stay correct."""
283+
monkeypatch.setenv("OPENAI_API_KEY", "sk-preexisting")
284+
285+
llm_config = LLMConfig(
286+
models={
287+
"openai-model": ModelConfig(
288+
model_name="gpt-4",
289+
model_url="https://api.openai.com/v1",
290+
api_key="sk-openai-1",
291+
),
292+
"custom-model": ModelConfig(
293+
model_name="custom-model-name",
294+
model_url="https://custom-llm.example.com/v1",
295+
api_key="sk-custom-2",
296+
),
297+
}
298+
)
299+
300+
caller = LiteLLMCaller(llm_config, debug_mode=True)
301+
302+
# First resolve OpenAI model
303+
openai_kwargs = caller._get_model_kwargs("openai-model")
304+
# Then resolve custom model
305+
custom_kwargs = caller._get_model_kwargs("custom-model")
306+
307+
import os
308+
# kwargs should always reflect model-specific keys
309+
assert openai_kwargs["api_key"] == "sk-openai-1"
310+
assert custom_kwargs["api_key"] == "sk-custom-2"
311+
# Env var ends up with the last key used (custom model)
312+
assert os.environ.get("OPENAI_API_KEY") == "sk-custom-2"
313+
221314
def test_custom_endpoint_with_extra_headers(self, monkeypatch):
222315
"""Custom endpoint should handle extra_headers correctly."""
223316
monkeypatch.setenv("CUSTOM_API_KEY", "sk-custom-auth")
@@ -256,7 +349,7 @@ def test_custom_endpoint_with_extra_headers(self, monkeypatch):
256349
# Verify api_base is set
257350
assert "api_base" in model_kwargs
258351

259-
def test_known_providers_still_get_api_key_in_kwargs(self):
352+
def test_known_providers_still_get_api_key_in_kwargs(self, monkeypatch):
260353
"""Verify that known providers also get api_key in kwargs (backward compatibility)."""
261354
# Test OpenAI
262355
llm_config = LLMConfig(
@@ -275,6 +368,9 @@ def test_known_providers_still_get_api_key_in_kwargs(self):
275368
assert "api_key" in model_kwargs
276369
assert model_kwargs["api_key"] == "sk-openai-test"
277370

371+
# cleanup any env var potentially set by implementation
372+
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
373+
278374
# Test OpenRouter
279375
llm_config = LLMConfig(
280376
models={
@@ -291,3 +387,6 @@ def test_known_providers_still_get_api_key_in_kwargs(self):
291387
# OpenRouter should get api_key in kwargs
292388
assert "api_key" in model_kwargs
293389
assert model_kwargs["api_key"] == "sk-or-test"
390+
391+
# cleanup any env var potentially set by implementation
392+
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)

0 commit comments

Comments
 (0)