Skip to content

Commit f8655c3

Browse files
authored
Merge branch 'main' into main
2 parents 6010bd4 + 1670d40 commit f8655c3

14 files changed

+116
-10
lines changed
+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
### Summary
2+
3+
<!-- Please give a short summary of the change and the problem this solves. -->
4+
5+
### Test plan
6+
7+
<!-- Please explain how this was tested -->
8+
9+
### Issue number
10+
11+
<!-- For example: "Closes #1234" -->
12+
13+
### Checks
14+
15+
- [ ] I've added new tests (if relevant)
16+
- [ ] I've added/updated the relevant documentation
17+
- [ ] I've run `make lint` and `make format`
18+
- [ ] I've made sure tests pass

Diff for: README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
140140

141141
## Tracing
142142

143-
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), and [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
143+
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
144144

145145
## Development (only needed if you need to edit the SDK/examples)
146146

Diff for: docs/agents.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,15 @@ The most common properties of an agent you'll configure are:
1313
```python
1414
from agents import Agent, ModelSettings, function_tool
1515

16+
@function_tool
1617
def get_weather(city: str) -> str:
1718
return f"The weather in {city} is sunny"
1819

1920
agent = Agent(
2021
name="Haiku agent",
2122
instructions="Always respond in haiku form",
2223
model="o3-mini",
23-
tools=[function_tool(get_weather)],
24+
tools=[get_weather],
2425
)
2526
```
2627

Diff for: docs/context.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ class UserInfo: # (1)!
3636
name: str
3737
uid: int
3838

39+
@function_tool
3940
async def fetch_user_age(wrapper: RunContextWrapper[UserInfo]) -> str: # (2)!
4041
return f"User {wrapper.context.name} is 47 years old"
4142

@@ -44,7 +45,7 @@ async def main():
4445

4546
agent = Agent[UserInfo]( # (4)!
4647
name="Assistant",
47-
tools=[function_tool(fetch_user_age)],
48+
tools=[fetch_user_age],
4849
)
4950

5051
result = await Runner.run(

Diff for: docs/running_agents.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ async def main():
7878
# San Francisco
7979

8080
# Second turn
81-
new_input = output.to_input_list() + [{"role": "user", "content": "What state is it in?"}]
81+
new_input = result.to_input_list() + [{"role": "user", "content": "What state is it in?"}]
8282
result = await Runner.run(agent, new_input)
8383
print(result.final_output)
8484
# California

Diff for: docs/tracing.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ async def main():
5050

5151
with trace("Joke workflow"): # (1)!
5252
first_result = await Runner.run(agent, "Tell me a joke")
53-
second_result = await Runner.run(agent, f"Rate this joke: {first_output.final_output}")
53+
second_result = await Runner.run(agent, f"Rate this joke: {first_result.final_output}")
5454
print(f"Joke: {first_result.final_output}")
5555
print(f"Rating: {second_result.final_output}")
5656
```
@@ -94,3 +94,4 @@ External trace processors include:
9494
- [Pydantic Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents)
9595
- [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk)
9696
- [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration))
97+
- [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent)

Diff for: examples/agent_patterns/input_guardrails.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ async def math_guardrail(
5353

5454
return GuardrailFunctionOutput(
5555
output_info=final_output,
56-
tripwire_triggered=not final_output.is_math_homework,
56+
tripwire_triggered=final_output.is_math_homework,
5757
)
5858

5959

Diff for: src/agents/guardrail.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ class InputGuardrail(Generic[TContext]):
8686
[RunContextWrapper[TContext], Agent[Any], str | list[TResponseInputItem]],
8787
MaybeAwaitable[GuardrailFunctionOutput],
8888
]
89-
"""A function that receives the the agent input and the context, and returns a
89+
"""A function that receives the agent input and the context, and returns a
9090
`GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally
9191
include information about the guardrail's output.
9292
"""

Diff for: src/agents/model_settings.py

+20
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,34 @@ class ModelSettings:
1010
1111
This class holds optional model configuration parameters (e.g. temperature,
1212
top_p, penalties, truncation, etc.).
13+
14+
Not all models/providers support all of these parameters, so please check the API documentation
15+
for the specific model and provider you are using.
1316
"""
1417

1518
temperature: float | None = None
19+
"""The temperature to use when calling the model."""
20+
1621
top_p: float | None = None
22+
"""The top_p to use when calling the model."""
23+
1724
frequency_penalty: float | None = None
25+
"""The frequency penalty to use when calling the model."""
26+
1827
presence_penalty: float | None = None
28+
"""The presence penalty to use when calling the model."""
29+
1930
tool_choice: Literal["auto", "required", "none"] | str | None = None
31+
"""The tool choice to use when calling the model."""
32+
2033
parallel_tool_calls: bool | None = False
34+
"""Whether to use parallel tool calls when calling the model."""
35+
2136
truncation: Literal["auto", "disabled"] | None = None
37+
"""The truncation strategy to use when calling the model."""
38+
39+
max_tokens: int | None = None
40+
"""The maximum number of output tokens to generate."""
2241

2342
def resolve(self, override: ModelSettings | None) -> ModelSettings:
2443
"""Produce a new ModelSettings by overlaying any non-None values from the
@@ -33,4 +52,5 @@ def resolve(self, override: ModelSettings | None) -> ModelSettings:
3352
tool_choice=override.tool_choice or self.tool_choice,
3453
parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
3554
truncation=override.truncation or self.truncation,
55+
max_tokens=override.max_tokens or self.max_tokens,
3656
)

Diff for: src/agents/models/openai_chatcompletions.py

+27-1
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,10 @@
5151
ResponseOutputText,
5252
ResponseRefusalDeltaEvent,
5353
ResponseTextDeltaEvent,
54+
ResponseUsage,
5455
)
5556
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
57+
from openai.types.responses.response_usage import OutputTokensDetails
5658

5759
from .. import _debug
5860
from ..agent_output import AgentOutputSchema
@@ -405,7 +407,23 @@ async def stream_response(
405407
for function_call in state.function_calls.values():
406408
outputs.append(function_call)
407409

408-
final_response = response.model_copy(update={"output": outputs, "usage": usage})
410+
final_response = response.model_copy()
411+
final_response.output = outputs
412+
final_response.usage = (
413+
ResponseUsage(
414+
input_tokens=usage.prompt_tokens,
415+
output_tokens=usage.completion_tokens,
416+
total_tokens=usage.total_tokens,
417+
output_tokens_details=OutputTokensDetails(
418+
reasoning_tokens=usage.completion_tokens_details.reasoning_tokens
419+
if usage.completion_tokens_details
420+
and usage.completion_tokens_details.reasoning_tokens
421+
else 0
422+
),
423+
)
424+
if usage
425+
else None
426+
)
409427

410428
yield ResponseCompletedEvent(
411429
response=final_response,
@@ -503,6 +521,7 @@ async def _fetch_response(
503521
top_p=self._non_null_or_not_given(model_settings.top_p),
504522
frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
505523
presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
524+
max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
506525
tool_choice=tool_choice,
507526
response_format=response_format,
508527
parallel_tool_calls=parallel_tool_calls,
@@ -808,6 +827,13 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
808827
"content": cls.extract_text_content(content),
809828
}
810829
result.append(msg_developer)
830+
elif role == "assistant":
831+
flush_assistant_message()
832+
msg_assistant: ChatCompletionAssistantMessageParam = {
833+
"role": "assistant",
834+
"content": cls.extract_text_content(content),
835+
}
836+
result.append(msg_assistant)
811837
else:
812838
raise UserError(f"Unexpected role in easy_input_message: {role}")
813839

Diff for: src/agents/models/openai_responses.py

+1
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,7 @@ async def _fetch_response(
235235
temperature=self._non_null_or_not_given(model_settings.temperature),
236236
top_p=self._non_null_or_not_given(model_settings.top_p),
237237
truncation=self._non_null_or_not_given(model_settings.truncation),
238+
max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
238239
tool_choice=tool_choice,
239240
parallel_tool_calls=parallel_tool_calls,
240241
stream=stream,

Diff for: src/agents/result.py

-2
Original file line numberDiff line numberDiff line change
@@ -216,5 +216,3 @@ def _cleanup_tasks(self):
216216

217217
if self._output_guardrails_task and not self._output_guardrails_task.done():
218218
self._output_guardrails_task.cancel()
219-
self._output_guardrails_task.cancel()
220-
self._output_guardrails_task.cancel()

Diff for: tests/test_openai_chatcompletions_converter.py

+35
Original file line numberDiff line numberDiff line change
@@ -393,3 +393,38 @@ def test_unknown_object_errors():
393393
with pytest.raises(UserError, match="Unhandled item type or structure"):
394394
# Purposely ignore the type error
395395
_Converter.items_to_messages([TestObject()]) # type: ignore
396+
397+
398+
def test_assistant_messages_in_history():
399+
"""
400+
Test that assistant messages are added to the history.
401+
"""
402+
messages = _Converter.items_to_messages(
403+
[
404+
{
405+
"role": "user",
406+
"content": "Hello",
407+
},
408+
{
409+
"role": "assistant",
410+
"content": "Hello?",
411+
},
412+
{
413+
"role": "user",
414+
"content": "What was my Name?",
415+
},
416+
]
417+
)
418+
419+
assert messages == [
420+
{"role": "user", "content": "Hello"},
421+
{"role": "assistant", "content": "Hello?"},
422+
{"role": "user", "content": "What was my Name?"},
423+
]
424+
assert len(messages) == 3
425+
assert messages[0]["role"] == "user"
426+
assert messages[0]["content"] == "Hello"
427+
assert messages[1]["role"] == "assistant"
428+
assert messages[1]["content"] == "Hello?"
429+
assert messages[2]["role"] == "user"
430+
assert messages[2]["content"] == "What was my Name?"

Diff for: tests/test_openai_chatcompletions_stream.py

+5
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,11 @@ async def patched_fetch_response(self, *args, **kwargs):
107107
assert isinstance(completed_resp.output[0].content[0], ResponseOutputText)
108108
assert completed_resp.output[0].content[0].text == "Hello"
109109

110+
assert completed_resp.usage, "usage should not be None"
111+
assert completed_resp.usage.input_tokens == 7
112+
assert completed_resp.usage.output_tokens == 5
113+
assert completed_resp.usage.total_tokens == 12
114+
110115

111116
@pytest.mark.allow_call_model_methods
112117
@pytest.mark.asyncio

0 commit comments

Comments
 (0)