Skip to content

Commit e0150f0

Browse files
committed
Add an openrouter provider
OpenRouter is a "muxing provider" which itself provides access to multiple models and providers. It speaks a dialect of the OpenAI protocol, but for our purposes, we can say it's OpenAI. There are some differences in handling the requests, though: 1) we need to know where to forward the request to, by default this is `https://openrouter.ai/api/v1`, this is done by setting the base_url parameter 2) we need to prefix the model with `openrouter/`. This is a lite-LLM-ism (see https://docs.litellm.ai/docs/providers/openrouter) which we'll be able to remove once we ditch litellm Initially I was considering just exposing the OpenAI provider on an additional route and handling the prefix based on the route, but I think having an explicit provider class is better as it allows us to handle any differences in OpenRouter dialect easily in the future. Related: #878
1 parent b50b28d commit e0150f0

File tree

7 files changed

+60
-3
lines changed

7 files changed

+60
-3
lines changed

src/codegate/config.py

+1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
# Default provider URLs
1818
DEFAULT_PROVIDER_URLS = {
1919
"openai": "https://api.openai.com/v1",
20+
"openrouter": "https://openrouter.ai/api/v1",
2021
"anthropic": "https://api.anthropic.com/v1",
2122
"vllm": "http://localhost:8000", # Base URL without /v1 path
2223
"ollama": "http://localhost:11434", # Default Ollama server URL

src/codegate/providers/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,15 @@
22
from codegate.providers.base import BaseProvider
33
from codegate.providers.ollama.provider import OllamaProvider
44
from codegate.providers.openai.provider import OpenAIProvider
5+
from codegate.providers.openrouter.provider import OpenRouterProvider
56
from codegate.providers.registry import ProviderRegistry
67
from codegate.providers.vllm.provider import VLLMProvider
78

89
__all__ = [
910
"BaseProvider",
1011
"ProviderRegistry",
1112
"OpenAIProvider",
13+
"OpenRouterProvider",
1214
"AnthropicProvider",
1315
"VLLMProvider",
1416
"OllamaProvider",
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from codegate.providers.openai.provider import OpenAIProvider
2+
3+
__all__ = ["OpenAIProvider"]
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import json
2+
3+
from fastapi import Header, HTTPException, Request
4+
5+
from codegate.clients.detector import DetectClient
6+
from codegate.pipeline.factory import PipelineFactory
7+
from codegate.providers.openai import OpenAIProvider
8+
9+
10+
class OpenRouterProvider(OpenAIProvider):
11+
def __init__(self, pipeline_factory: PipelineFactory):
12+
super().__init__(pipeline_factory)
13+
14+
@property
15+
def provider_route_name(self) -> str:
16+
return "openrouter"
17+
18+
def _setup_routes(self):
19+
@self.router.post(f"/{self.provider_route_name}/api/v1/chat/completions")
20+
@self.router.post(f"/{self.provider_route_name}/chat/completions")
21+
@DetectClient()
22+
async def create_completion(
23+
request: Request,
24+
authorization: str = Header(..., description="Bearer token"),
25+
):
26+
if not authorization.startswith("Bearer "):
27+
raise HTTPException(status_code=401, detail="Invalid authorization header")
28+
29+
api_key = authorization.split(" ")[1]
30+
body = await request.body()
31+
data = json.loads(body)
32+
33+
base_url = self._get_base_url()
34+
data["base_url"] = base_url
35+
36+
# litellm workaround - add openrouter/ prefix to model name to make it openai-compatible
37+
# once we get rid of litellm, this can simply be removed
38+
original_model = data.get("model", "")
39+
if not original_model.startswith("openrouter/"):
40+
data["model"] = f"openrouter/{original_model}"
41+
42+
return await self.process_request(
43+
data,
44+
api_key,
45+
request.url.path,
46+
request.state.detected_client,
47+
)

src/codegate/providers/vllm/provider.py

-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
from codegate.clients.clients import ClientType
1111
from codegate.clients.detector import DetectClient
12-
from codegate.config import Config
1312
from codegate.pipeline.factory import PipelineFactory
1413
from codegate.providers.base import BaseProvider, ModelFetchError
1514
from codegate.providers.litellmshim import LiteLLmShim, sse_stream_generator

src/codegate/server.py

+5
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from codegate.providers.lm_studio.provider import LmStudioProvider
1919
from codegate.providers.ollama.provider import OllamaProvider
2020
from codegate.providers.openai.provider import OpenAIProvider
21+
from codegate.providers.openrouter.provider import OpenRouterProvider
2122
from codegate.providers.registry import ProviderRegistry, get_provider_registry
2223
from codegate.providers.vllm.provider import VLLMProvider
2324

@@ -75,6 +76,10 @@ async def log_user_agent(request: Request, call_next):
7576
ProviderType.openai,
7677
OpenAIProvider(pipeline_factory),
7778
)
79+
registry.add_provider(
80+
ProviderType.openai,
81+
OpenRouterProvider(pipeline_factory),
82+
)
7883
registry.add_provider(
7984
ProviderType.anthropic,
8085
AnthropicProvider(

tests/test_server.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,8 @@ def test_provider_registration(mock_registry, mock_secrets_mgr, mock_pipeline_fa
108108
# Verify all providers were registered
109109
registry_instance = mock_registry.return_value
110110
assert (
111-
registry_instance.add_provider.call_count == 6
112-
) # openai, anthropic, llamacpp, vllm, ollama, lm_studio
111+
registry_instance.add_provider.call_count == 7
112+
) # openai, anthropic, llamacpp, vllm, ollama, lm_studio, openrouter
113113

114114
# Verify specific providers were registered
115115
provider_names = [call.args[0] for call in registry_instance.add_provider.call_args_list]

0 commit comments

Comments
 (0)