We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 4989fc8 commit 8f37e0aCopy full SHA for 8f37e0a
py/core/providers/llm/openai.py
@@ -399,8 +399,13 @@ def _get_base_args(self, generation_config: GenerationConfig) -> dict:
399
400
model_str = generation_config.model or ""
401
402
- if any(model_prefix in model_str.lower() for model_prefix in ["o1", "o3", "gpt-5"]):
403
- args["max_completion_tokens"] = generation_config.max_tokens_to_sample
+ if any(
+ model_prefix in model_str.lower()
404
+ for model_prefix in ["o1", "o3", "gpt-5"]
405
+ ):
406
+ args["max_completion_tokens"] = (
407
+ generation_config.max_tokens_to_sample
408
+ )
409
410
else:
411
args["max_tokens"] = generation_config.max_tokens_to_sample
0 commit comments