Skip to content

Commit 0c87441

Browse files
test(test_anthropic_completion.py): add test ensuring anthropic structured output response is consistent
Resolves #8291
1 parent f651d51 commit 0c87441

File tree

2 files changed

+66
-4
lines changed

2 files changed

+66
-4
lines changed

litellm/model_prices_and_context_window_backup.json

+15
Original file line numberDiff line numberDiff line change
@@ -1069,6 +1069,21 @@
10691069
"supports_prompt_caching": true,
10701070
"supports_tool_choice": true
10711071
},
1072+
"azure/o1-2024-12-17": {
1073+
"max_tokens": 100000,
1074+
"max_input_tokens": 200000,
1075+
"max_output_tokens": 100000,
1076+
"input_cost_per_token": 0.000015,
1077+
"output_cost_per_token": 0.000060,
1078+
"cache_read_input_token_cost": 0.0000075,
1079+
"litellm_provider": "azure",
1080+
"mode": "chat",
1081+
"supports_function_calling": true,
1082+
"supports_parallel_function_calling": true,
1083+
"supports_vision": true,
1084+
"supports_prompt_caching": true,
1085+
"supports_tool_choice": true
1086+
},
10721087
"azure/o1-preview": {
10731088
"max_tokens": 32768,
10741089
"max_input_tokens": 128000,

tests/llm_translation/test_anthropic_completion.py

+51-4
Original file line numberDiff line numberDiff line change
@@ -1022,10 +1022,26 @@ def test_anthropic_json_mode_and_tool_call_response(
10221022
[
10231023
("stop", ["stop"], True), # basic string
10241024
(["stop1", "stop2"], ["stop1", "stop2"], True), # list of strings
1025-
(" ", None, True), # whitespace string should be dropped when drop_params is True
1026-
(" ", [" "], False), # whitespace string should be kept when drop_params is False
1027-
(["stop1", " ", "stop2"], ["stop1", "stop2"], True), # list with whitespace that should be filtered
1028-
(["stop1", " ", "stop2"], ["stop1", " ", "stop2"], False), # list with whitespace that should be kept
1025+
(
1026+
" ",
1027+
None,
1028+
True,
1029+
), # whitespace string should be dropped when drop_params is True
1030+
(
1031+
" ",
1032+
[" "],
1033+
False,
1034+
), # whitespace string should be kept when drop_params is False
1035+
(
1036+
["stop1", " ", "stop2"],
1037+
["stop1", "stop2"],
1038+
True,
1039+
), # list with whitespace that should be filtered
1040+
(
1041+
["stop1", " ", "stop2"],
1042+
["stop1", " ", "stop2"],
1043+
False,
1044+
), # list with whitespace that should be kept
10291045
(None, None, True), # None input
10301046
],
10311047
)
@@ -1035,3 +1051,34 @@ def test_map_stop_sequences(stop_input, expected_output, drop_params):
10351051
config = AnthropicConfig()
10361052
result = config._map_stop_sequences(stop_input)
10371053
assert result == expected_output
1054+
1055+
1056+
@pytest.mark.asyncio
1057+
async def test_anthropic_structured_output():
1058+
"""
1059+
Test the _transform_response_for_structured_output
1060+
1061+
Relevant Issue: https://github.com/BerriAI/litellm/issues/8291
1062+
"""
1063+
from litellm import acompletion
1064+
1065+
args = {
1066+
"model": "claude-3-5-sonnet-20240620",
1067+
"seed": 3015206306868917280,
1068+
"stop": None,
1069+
"messages": [
1070+
{
1071+
"role": "system",
1072+
"content": 'You are a hello world agent.\nAlways respond in the following valid JSON format: {\n "response": "response",\n}\n',
1073+
},
1074+
{"role": "user", "content": "Respond with hello world"},
1075+
],
1076+
"temperature": 0,
1077+
"response_format": {"type": "json_object"},
1078+
"drop_params": True,
1079+
}
1080+
1081+
response = await acompletion(**args)
1082+
assert response is not None
1083+
1084+
print(response)

0 commit comments

Comments
 (0)