Skip to content

Commit 1670d40

Browse files
authored
Merge pull request #112 from openai/pr112
Fix streaming in chat completions
2 parents 5626cb1 + 2302b47 commit 1670d40

File tree

2 files changed

+24
-1
lines changed

2 files changed

+24
-1
lines changed

src/agents/models/openai_chatcompletions.py

+19-1
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,10 @@
5151
ResponseOutputText,
5252
ResponseRefusalDeltaEvent,
5353
ResponseTextDeltaEvent,
54+
ResponseUsage,
5455
)
5556
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
57+
from openai.types.responses.response_usage import OutputTokensDetails
5658

5759
from .. import _debug
5860
from ..agent_output import AgentOutputSchema
@@ -405,7 +407,23 @@ async def stream_response(
405407
for function_call in state.function_calls.values():
406408
outputs.append(function_call)
407409

408-
final_response = response.model_copy(update={"output": outputs, "usage": usage})
410+
final_response = response.model_copy()
411+
final_response.output = outputs
412+
final_response.usage = (
413+
ResponseUsage(
414+
input_tokens=usage.prompt_tokens,
415+
output_tokens=usage.completion_tokens,
416+
total_tokens=usage.total_tokens,
417+
output_tokens_details=OutputTokensDetails(
418+
reasoning_tokens=usage.completion_tokens_details.reasoning_tokens
419+
if usage.completion_tokens_details
420+
and usage.completion_tokens_details.reasoning_tokens
421+
else 0
422+
),
423+
)
424+
if usage
425+
else None
426+
)
409427

410428
yield ResponseCompletedEvent(
411429
response=final_response,

tests/test_openai_chatcompletions_stream.py

+5
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,11 @@ async def patched_fetch_response(self, *args, **kwargs):
107107
assert isinstance(completed_resp.output[0].content[0], ResponseOutputText)
108108
assert completed_resp.output[0].content[0].text == "Hello"
109109

110+
assert completed_resp.usage, "usage should not be None"
111+
assert completed_resp.usage.input_tokens == 7
112+
assert completed_resp.usage.output_tokens == 5
113+
assert completed_resp.usage.total_tokens == 12
114+
110115

111116
@pytest.mark.allow_call_model_methods
112117
@pytest.mark.asyncio

0 commit comments

Comments
 (0)