Skip to content

Commit fde49cf

Browse files
committed
Add max_tokens and documentation to model settings
1 parent 96913b8 commit fde49cf

File tree

3 files changed

+22
-0
lines changed

3 files changed

+22
-0
lines changed

src/agents/model_settings.py

+20
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,34 @@ class ModelSettings:
1010
1111
This class holds optional model configuration parameters (e.g. temperature,
1212
top_p, penalties, truncation, etc.).
13+
14+
Not all models/providers support all of these parameters, so please check the API documentation
15+
for the specific model and provider you are using.
1316
"""
1417

1518
temperature: float | None = None
19+
"""The temperature to use when calling the model."""
20+
1621
top_p: float | None = None
22+
"""The top_p to use when calling the model."""
23+
1724
frequency_penalty: float | None = None
25+
"""The frequency penalty to use when calling the model."""
26+
1827
presence_penalty: float | None = None
28+
"""The presence penalty to use when calling the model."""
29+
1930
tool_choice: Literal["auto", "required", "none"] | str | None = None
31+
"""The tool choice to use when calling the model."""
32+
2033
parallel_tool_calls: bool | None = False
34+
"""Whether to use parallel tool calls when calling the model."""
35+
2136
truncation: Literal["auto", "disabled"] | None = None
37+
"""The truncation strategy to use when calling the model."""
38+
39+
max_tokens: int | None = None
40+
"""The maximum number of output tokens to generate."""
2241

2342
def resolve(self, override: ModelSettings | None) -> ModelSettings:
2443
"""Produce a new ModelSettings by overlaying any non-None values from the
@@ -33,4 +52,5 @@ def resolve(self, override: ModelSettings | None) -> ModelSettings:
3352
tool_choice=override.tool_choice or self.tool_choice,
3453
parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
3554
truncation=override.truncation or self.truncation,
55+
max_tokens=override.max_tokens or self.max_tokens,
3656
)

src/agents/models/openai_chatcompletions.py

+1
Original file line numberDiff line numberDiff line change
@@ -503,6 +503,7 @@ async def _fetch_response(
503503
top_p=self._non_null_or_not_given(model_settings.top_p),
504504
frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
505505
presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
506+
max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
506507
tool_choice=tool_choice,
507508
response_format=response_format,
508509
parallel_tool_calls=parallel_tool_calls,

src/agents/models/openai_responses.py

+1
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,7 @@ async def _fetch_response(
235235
temperature=self._non_null_or_not_given(model_settings.temperature),
236236
top_p=self._non_null_or_not_given(model_settings.top_p),
237237
truncation=self._non_null_or_not_given(model_settings.truncation),
238+
max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
238239
tool_choice=tool_choice,
239240
parallel_tool_calls=parallel_tool_calls,
240241
stream=stream,

0 commit comments

Comments
 (0)