From 3b295e47d7f2b54393338b924de55002be41b12a Mon Sep 17 00:00:00 2001 From: KatHaruto Date: Fri, 2 May 2025 17:01:58 +0900 Subject: [PATCH 1/3] fix: add ensure_ascii=False to json.dumps for correct Unicode output --- src/agents/extensions/models/litellm_model.py | 10 +++++++--- src/agents/mcp/util.py | 5 ++++- src/agents/models/chatcmpl_converter.py | 3 ++- src/agents/models/openai_chatcompletions.py | 11 ++++++++--- src/agents/models/openai_responses.py | 10 +++++++--- 5 files changed, 28 insertions(+), 11 deletions(-) diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index dc672acd..7d49fe00 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -96,7 +96,11 @@ async def get_response( logger.debug("Received model response") else: logger.debug( - f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n" + f"LLM resp:\n{json.dumps( + response.choices[0].message.model_dump(), + indent=2, + ensure_ascii=False + )}\n" ) if hasattr(response, "usage"): @@ -251,8 +255,8 @@ async def _fetch_response( else: logger.debug( f"Calling Litellm model: {self.model}\n" - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" + f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" diff --git a/src/agents/mcp/util.py b/src/agents/mcp/util.py index bbfe1885..95de22cb 100644 --- a/src/agents/mcp/util.py +++ b/src/agents/mcp/util.py @@ -116,7 +116,10 @@ async def invoke_mcp_tool( if len(result.content) == 1: tool_output = result.content[0].model_dump_json() elif len(result.content) > 1: - tool_output = json.dumps([item.model_dump() for item in result.content]) + tool_output = json.dumps( + [item.model_dump() for item in result.content], + ensure_ascii=False + ) else: logger.error(f"Errored MCP tool result: {result}") tool_output = "Error running tool." diff --git a/src/agents/models/chatcmpl_converter.py b/src/agents/models/chatcmpl_converter.py index 613a3745..0244be5d 100644 --- a/src/agents/models/chatcmpl_converter.py +++ b/src/agents/models/chatcmpl_converter.py @@ -393,7 +393,8 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: { "queries": file_search.get("queries", []), "status": file_search.get("status"), - } + }, + ensure_ascii=False ), }, ) diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 89619f83..06895d54 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -74,7 +74,12 @@ async def get_response( logger.debug("Received model response") else: logger.debug( - f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n" + "LLM resp:\n", + f"{json.dumps( + response.choices[0].message.model_dump(), + indent=2, + ensure_ascii=False + )}\n" ) usage = ( @@ -222,8 +227,8 @@ async def _fetch_response( logger.debug("Calling LLM") else: logger.debug( - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" + f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index c1ff85b9..f356796b 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -89,7 +89,11 @@ async def get_response( else: logger.debug( "LLM resp:\n" - f"{json.dumps([x.model_dump() for x in response.output], indent=2)}\n" + f"{json.dumps( + [x.model_dump() for x in response.output], + indent=2, + ensure_ascii=False + )}\n" ) usage = ( @@ -231,8 +235,8 @@ async def _fetch_response( else: logger.debug( f"Calling LLM {self.model} with input:\n" - f"{json.dumps(list_input, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n" + f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" From 979d3c459b5d32cab3bbb7a0a2bd39b74084cab8 Mon Sep 17 00:00:00 2001 From: KatHaruto Date: Fri, 2 May 2025 18:04:37 +0900 Subject: [PATCH 2/3] fix quotaion for older python version --- src/agents/models/openai_chatcompletions.py | 4 ++-- src/agents/models/openai_responses.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 06895d54..dbf9d141 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -75,11 +75,11 @@ async def get_response( else: logger.debug( "LLM resp:\n", - f"{json.dumps( + f"""{json.dumps( response.choices[0].message.model_dump(), indent=2, ensure_ascii=False - )}\n" + )}\n""" ) usage = ( diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index f356796b..a24c1adf 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -89,11 +89,11 @@ async def get_response( else: logger.debug( "LLM resp:\n" - f"{json.dumps( + f"""{json.dumps( [x.model_dump() for x in response.output], indent=2, ensure_ascii=False - )}\n" + )}\n""" ) usage = ( From 0542cfce7c89f55b6c26b12cf245460c18074a12 Mon Sep 17 00:00:00 2001 From: KatHaruto Date: Thu, 8 May 2025 00:48:26 +0900 Subject: [PATCH 3/3] avoid encode ascii --- src/agents/mcp/util.py | 5 +---- src/agents/models/chatcmpl_converter.py | 3 +-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/agents/mcp/util.py b/src/agents/mcp/util.py index 95de22cb..bbfe1885 100644 --- a/src/agents/mcp/util.py +++ b/src/agents/mcp/util.py @@ -116,10 +116,7 @@ async def invoke_mcp_tool( if len(result.content) == 1: tool_output = result.content[0].model_dump_json() elif len(result.content) > 1: - tool_output = json.dumps( - [item.model_dump() for item in result.content], - ensure_ascii=False - ) + tool_output = json.dumps([item.model_dump() for item in result.content]) else: logger.error(f"Errored MCP tool result: {result}") tool_output = "Error running tool." diff --git a/src/agents/models/chatcmpl_converter.py b/src/agents/models/chatcmpl_converter.py index 0244be5d..613a3745 100644 --- a/src/agents/models/chatcmpl_converter.py +++ b/src/agents/models/chatcmpl_converter.py @@ -393,8 +393,7 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: { "queries": file_search.get("queries", []), "status": file_search.get("status"), - }, - ensure_ascii=False + } ), }, )