Skip to content

Commit 16cfce1

Browse files
authored
Add examples and documentation for using custom model providers (#110)
2 parents 3ef5f47 + 25a6331 commit 16cfce1

File tree

8 files changed

+247
-34
lines changed

8 files changed

+247
-34
lines changed

docs/models.md

+9-16
Original file line numberDiff line numberDiff line change
@@ -53,21 +53,14 @@ async def main():
5353

5454
## Using other LLM providers
5555

56-
Many providers also support the OpenAI API format, which means you can pass a `base_url` to the existing OpenAI model implementations and use them easily. `ModelSettings` is used to configure tuning parameters (e.g., temperature, top_p) for the model you select.
56+
You can use other LLM providers in 3 ways (examples [here](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/)):
5757

58-
```python
59-
external_client = AsyncOpenAI(
60-
api_key="EXTERNAL_API_KEY",
61-
base_url="https://api.external.com/v1/",
62-
)
58+
1. [`set_default_openai_client`][agents.set_default_openai_client] is useful in cases where you want to globally use an instance of `AsyncOpenAI` as the LLM client. This is for cases where the LLM provider has an OpenAI compatible API endpoint, and you can set the `base_url` and `api_key`. See a configurable example in [examples/model_providers/custom_example_global.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_global.py).
59+
2. [`ModelProvider`][agents.models.interface.ModelProvider] is at the `Runner.run` level. This lets you say "use a custom model provider for all agents in this run". See a configurable example in [examples/model_providers/custom_example_provider.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_provider.py).
60+
3. [`Agent.model`][agents.agent.Agent.model] lets you specify the model on a specific Agent instance. This enables you to mix and match different providers for different agents. See a configurable example in [examples/model_providers/custom_example_agent.py](https://github.com/openai/openai-agents-python/tree/main/examples/model_providers/custom_example_agent.py).
6361

64-
spanish_agent = Agent(
65-
name="Spanish agent",
66-
instructions="You only speak Spanish.",
67-
model=OpenAIChatCompletionsModel(
68-
model="EXTERNAL_MODEL_NAME",
69-
openai_client=external_client,
70-
),
71-
model_settings=ModelSettings(temperature=0.5),
72-
)
73-
```
62+
In cases where you do not have an API key from `platform.openai.com`, we recommend disabling tracing via `set_tracing_disabled()`, or setting up a [different tracing processor](tracing.md).
63+
64+
!!! note
65+
66+
In these examples, we use the Chat Completions API/model, because most LLM providers don't yet support the Responses API. If your LLM provider does support it, we recommend using Responses.

examples/model_providers/README.md

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Custom LLM providers
2+
3+
The examples in this directory demonstrate how you might use a non-OpenAI LLM provider. To run them, first set a base URL, API key and model.
4+
5+
```bash
6+
export EXAMPLE_BASE_URL="..."
7+
export EXAMPLE_API_KEY="..."
8+
export EXAMPLE_MODEL_NAME"..."
9+
```
10+
11+
Then run the examples, e.g.:
12+
13+
```
14+
python examples/model_providers/custom_example_provider.py
15+
16+
Loops within themselves,
17+
Function calls its own being,
18+
Depth without ending.
19+
```
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
import asyncio
2+
import os
3+
4+
from openai import AsyncOpenAI
5+
6+
from agents import Agent, OpenAIChatCompletionsModel, Runner, set_tracing_disabled
7+
8+
BASE_URL = os.getenv("EXAMPLE_BASE_URL") or ""
9+
API_KEY = os.getenv("EXAMPLE_API_KEY") or ""
10+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or ""
11+
12+
if not BASE_URL or not API_KEY or not MODEL_NAME:
13+
raise ValueError(
14+
"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code."
15+
)
16+
17+
"""This example uses a custom provider for a specific agent. Steps:
18+
1. Create a custom OpenAI client.
19+
2. Create a `Model` that uses the custom client.
20+
3. Set the `model` on the Agent.
21+
22+
Note that in this example, we disable tracing under the assumption that you don't have an API key
23+
from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var
24+
or call set_tracing_export_api_key() to set a tracing specific key.
25+
"""
26+
client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)
27+
set_tracing_disabled(disabled=True)
28+
29+
# An alternate approach that would also work:
30+
# PROVIDER = OpenAIProvider(openai_client=client)
31+
# agent = Agent(..., model="some-custom-model")
32+
# Runner.run(agent, ..., run_config=RunConfig(model_provider=PROVIDER))
33+
34+
35+
async def main():
36+
# This agent will use the custom LLM provider
37+
agent = Agent(
38+
name="Assistant",
39+
instructions="You only respond in haikus.",
40+
model=OpenAIChatCompletionsModel(model=MODEL_NAME, openai_client=client),
41+
)
42+
43+
result = await Runner.run(
44+
agent,
45+
"Tell me about recursion in programming.",
46+
)
47+
print(result.final_output)
48+
49+
50+
if __name__ == "__main__":
51+
asyncio.run(main())
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import asyncio
2+
import os
3+
4+
from openai import AsyncOpenAI
5+
6+
from agents import (
7+
Agent,
8+
Runner,
9+
set_default_openai_api,
10+
set_default_openai_client,
11+
set_tracing_disabled,
12+
)
13+
14+
BASE_URL = os.getenv("EXAMPLE_BASE_URL") or ""
15+
API_KEY = os.getenv("EXAMPLE_API_KEY") or ""
16+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or ""
17+
18+
if not BASE_URL or not API_KEY or not MODEL_NAME:
19+
raise ValueError(
20+
"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code."
21+
)
22+
23+
24+
"""This example uses a custom provider for all requests by default. We do three things:
25+
1. Create a custom client.
26+
2. Set it as the default OpenAI client, and don't use it for tracing.
27+
3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API.
28+
29+
Note that in this example, we disable tracing under the assumption that you don't have an API key
30+
from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var
31+
or call set_tracing_export_api_key() to set a tracing specific key.
32+
"""
33+
34+
client = AsyncOpenAI(
35+
base_url=BASE_URL,
36+
api_key=API_KEY,
37+
)
38+
set_default_openai_client(client=client, use_for_tracing=False)
39+
set_default_openai_api("chat_completions")
40+
set_tracing_disabled(disabled=True)
41+
42+
43+
async def main():
44+
agent = Agent(
45+
name="Assistant",
46+
instructions="You only respond in haikus.",
47+
model=MODEL_NAME,
48+
)
49+
50+
result = await Runner.run(agent, "Tell me about recursion in programming.")
51+
print(result.final_output)
52+
53+
54+
if __name__ == "__main__":
55+
asyncio.run(main())
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
from __future__ import annotations
2+
3+
import asyncio
4+
import os
5+
6+
from openai import AsyncOpenAI
7+
8+
from agents import (
9+
Agent,
10+
Model,
11+
ModelProvider,
12+
OpenAIChatCompletionsModel,
13+
RunConfig,
14+
Runner,
15+
set_tracing_disabled,
16+
)
17+
18+
BASE_URL = os.getenv("EXAMPLE_BASE_URL") or ""
19+
API_KEY = os.getenv("EXAMPLE_API_KEY") or ""
20+
MODEL_NAME = os.getenv("EXAMPLE_MODEL_NAME") or ""
21+
22+
if not BASE_URL or not API_KEY or not MODEL_NAME:
23+
raise ValueError(
24+
"Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code."
25+
)
26+
27+
28+
"""This example uses a custom provider for some calls to Runner.run(), and direct calls to OpenAI for
29+
others. Steps:
30+
1. Create a custom OpenAI client.
31+
2. Create a ModelProvider that uses the custom client.
32+
3. Use the ModelProvider in calls to Runner.run(), only when we want to use the custom LLM provider.
33+
34+
Note that in this example, we disable tracing under the assumption that you don't have an API key
35+
from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var
36+
or call set_tracing_export_api_key() to set a tracing specific key.
37+
"""
38+
client = AsyncOpenAI(base_url=BASE_URL, api_key=API_KEY)
39+
set_tracing_disabled(disabled=True)
40+
41+
42+
class CustomModelProvider(ModelProvider):
43+
def get_model(self, model_name: str | None) -> Model:
44+
return OpenAIChatCompletionsModel(model=model_name or MODEL_NAME, openai_client=client)
45+
46+
47+
CUSTOM_MODEL_PROVIDER = CustomModelProvider()
48+
49+
50+
async def main():
51+
agent = Agent(
52+
name="Assistant",
53+
instructions="You only respond in haikus.",
54+
)
55+
56+
# This will use the custom model provider
57+
result = await Runner.run(
58+
agent,
59+
"Tell me about recursion in programming.",
60+
run_config=RunConfig(model_provider=CUSTOM_MODEL_PROVIDER),
61+
)
62+
print(result.final_output)
63+
64+
# If you uncomment this, it will use OpenAI directly, not the custom provider
65+
# result = await Runner.run(
66+
# agent,
67+
# "Tell me about recursion in programming.",
68+
# )
69+
# print(result.final_output)
70+
71+
72+
if __name__ == "__main__":
73+
asyncio.run(main())

src/agents/__init__.py

+10-4
Original file line numberDiff line numberDiff line change
@@ -92,13 +92,19 @@
9292
from .usage import Usage
9393

9494

95-
def set_default_openai_key(key: str) -> None:
96-
"""Set the default OpenAI API key to use for LLM requests and tracing. This is only necessary if
97-
the OPENAI_API_KEY environment variable is not already set.
95+
def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None:
96+
"""Set the default OpenAI API key to use for LLM requests (and optionally tracing(). This is
97+
only necessary if the OPENAI_API_KEY environment variable is not already set.
9898
9999
If provided, this key will be used instead of the OPENAI_API_KEY environment variable.
100+
101+
Args:
102+
key: The OpenAI key to use.
103+
use_for_tracing: Whether to also use this key to send traces to OpenAI. Defaults to True
104+
If False, you'll either need to set the OPENAI_API_KEY environment variable or call
105+
set_tracing_export_api_key() with the API key you want to use for tracing.
100106
"""
101-
_config.set_default_openai_key(key)
107+
_config.set_default_openai_key(key, use_for_tracing)
102108

103109

104110
def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool = True) -> None:

src/agents/_config.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,18 @@
55
from .tracing import set_tracing_export_api_key
66

77

8-
def set_default_openai_key(key: str) -> None:
9-
set_tracing_export_api_key(key)
8+
def set_default_openai_key(key: str, use_for_tracing: bool) -> None:
109
_openai_shared.set_default_openai_key(key)
1110

11+
if use_for_tracing:
12+
set_tracing_export_api_key(key)
13+
1214

1315
def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:
16+
_openai_shared.set_default_openai_client(client)
17+
1418
if use_for_tracing:
1519
set_tracing_export_api_key(client.api_key)
16-
_openai_shared.set_default_openai_client(client)
1720

1821

1922
def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None:

src/agents/models/openai_provider.py

+24-11
Original file line numberDiff line numberDiff line change
@@ -38,28 +38,41 @@ def __init__(
3838
assert api_key is None and base_url is None, (
3939
"Don't provide api_key or base_url if you provide openai_client"
4040
)
41-
self._client = openai_client
41+
self._client: AsyncOpenAI | None = openai_client
4242
else:
43-
self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
44-
api_key=api_key or _openai_shared.get_default_openai_key(),
45-
base_url=base_url,
46-
organization=organization,
47-
project=project,
48-
http_client=shared_http_client(),
49-
)
43+
self._client = None
44+
self._stored_api_key = api_key
45+
self._stored_base_url = base_url
46+
self._stored_organization = organization
47+
self._stored_project = project
5048

51-
self._is_openai_model = self._client.base_url.host.startswith("api.openai.com")
5249
if use_responses is not None:
5350
self._use_responses = use_responses
5451
else:
5552
self._use_responses = _openai_shared.get_use_responses_by_default()
5653

54+
# We lazy load the client in case you never actually use OpenAIProvider(). Otherwise
55+
# AsyncOpenAI() raises an error if you don't have an API key set.
56+
def _get_client(self) -> AsyncOpenAI:
57+
if self._client is None:
58+
self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
59+
api_key=self._stored_api_key or _openai_shared.get_default_openai_key(),
60+
base_url=self._stored_base_url,
61+
organization=self._stored_organization,
62+
project=self._stored_project,
63+
http_client=shared_http_client(),
64+
)
65+
66+
return self._client
67+
5768
def get_model(self, model_name: str | None) -> Model:
5869
if model_name is None:
5970
model_name = DEFAULT_MODEL
6071

72+
client = self._get_client()
73+
6174
return (
62-
OpenAIResponsesModel(model=model_name, openai_client=self._client)
75+
OpenAIResponsesModel(model=model_name, openai_client=client)
6376
if self._use_responses
64-
else OpenAIChatCompletionsModel(model=model_name, openai_client=self._client)
77+
else OpenAIChatCompletionsModel(model=model_name, openai_client=client)
6578
)

0 commit comments

Comments
 (0)