diff --git a/examples/basic/lifecycle_example.py b/examples/basic/lifecycle_example.py index 9b365106..285bfecd 100644 --- a/examples/basic/lifecycle_example.py +++ b/examples/basic/lifecycle_example.py @@ -79,7 +79,7 @@ class FinalResult(BaseModel): start_agent = Agent( name="Start Agent", - instructions="Generate a random number. If it's even, stop. If it's odd, hand off to the multipler agent.", + instructions="Generate a random number. If it's even, stop. If it's odd, hand off to the multiplier agent.", tools=[random_number], output_type=FinalResult, handoffs=[multiply_agent], diff --git a/examples/research_bot/agents/search_agent.py b/examples/research_bot/agents/search_agent.py index 72cbc8e1..f69cfda8 100644 --- a/examples/research_bot/agents/search_agent.py +++ b/examples/research_bot/agents/search_agent.py @@ -4,7 +4,7 @@ INSTRUCTIONS = ( "You are a research assistant. Given a search term, you search the web for that term and" "produce a concise summary of the results. The summary must 2-3 paragraphs and less than 300" - "words. Capture the main points. Write succintly, no need to have complete sentences or good" + "words. Capture the main points. Write succinctly, no need to have complete sentences or good" "grammar. This will be consumed by someone synthesizing a report, so its vital you capture the" "essence and ignore any fluff. Do not include any additional commentary other than the summary" "itself." diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 7fe981e8..8c649813 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -757,7 +757,7 @@ def extract_all_content( elif isinstance(c, dict) and c.get("type") == "input_file": raise UserError(f"File uploads are not supported for chat completions {c}") else: - raise UserError(f"Unknonw content: {c}") + raise UserError(f"Unknown content: {c}") return out @classmethod diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 78765ecb..3eea39ce 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -83,7 +83,7 @@ async def get_response( ) if _debug.DONT_LOG_MODEL_DATA: - logger.debug("LLM responsed") + logger.debug("LLM responded") else: logger.debug( "LLM resp:\n" diff --git a/tests/test_agent_runner_streamed.py b/tests/test_agent_runner_streamed.py index 4c7c7efd..87a76a70 100644 --- a/tests/test_agent_runner_streamed.py +++ b/tests/test_agent_runner_streamed.py @@ -674,7 +674,7 @@ async def test_streaming_events(): total_expected_item_count = sum(expected_item_type_map.values()) assert event_counts["run_item_stream_event"] == total_expected_item_count, ( - f"Expectd {total_expected_item_count} events, got {event_counts['run_item_stream_event']}" + f"Expected {total_expected_item_count} events, got {event_counts['run_item_stream_event']}" f"Expected events were: {expected_item_type_map}, got {event_counts}" ) diff --git a/tests/test_global_hooks.py b/tests/test_global_hooks.py index 6ac35b90..45854410 100644 --- a/tests/test_global_hooks.py +++ b/tests/test_global_hooks.py @@ -223,7 +223,7 @@ class Foo(TypedDict): @pytest.mark.asyncio -async def test_structed_output_non_streamed_agent_hooks(): +async def test_structured_output_non_streamed_agent_hooks(): hooks = RunHooksForTests() model = FakeModel() agent_1 = Agent(name="test_1", model=model) @@ -296,7 +296,7 @@ async def test_structed_output_non_streamed_agent_hooks(): @pytest.mark.asyncio -async def test_structed_output_streamed_agent_hooks(): +async def test_structured_output_streamed_agent_hooks(): hooks = RunHooksForTests() model = FakeModel() agent_1 = Agent(name="test_1", model=model)