9
9
10
10
from ddtrace .internal .utils .version import parse_version
11
11
from tests .contrib .langchain .utils import get_request_vcr
12
- # from tests.utils import flaky
12
+
13
+ from tests .utils import flaky
13
14
14
15
15
16
LANGCHAIN_VERSION = parse_version (langchain .__version__ )
23
24
"meta.langchain.request.openai.parameters.logprobs" ,
24
25
"meta.langchain.request.openai.parameters.seed" , # langchain-openai llm call now includes seed as param
25
26
"meta.langchain.request.openai.parameters.logprobs" , # langchain-openai llm call now includes seed as param
26
- "metrics.langchain.tokens.total_cost" , # total_cost depends on if tiktoken is installed
27
27
# these are sometimes named differently
28
28
"meta.langchain.request.openai.parameters.max_tokens" ,
29
29
"meta.langchain.request.openai.parameters.max_completion_tokens" ,
@@ -37,15 +37,15 @@ def request_vcr():
37
37
yield get_request_vcr ()
38
38
39
39
40
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
40
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
41
41
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
42
42
def test_openai_llm_sync (langchain_openai , request_vcr ):
43
43
llm = langchain_openai .OpenAI ()
44
44
with request_vcr .use_cassette ("openai_completion_sync.yaml" ):
45
45
llm .invoke ("Can you explain what Descartes meant by 'I think, therefore I am'?" )
46
46
47
47
48
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
48
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
49
49
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
50
50
def test_openai_llm_sync_multiple_prompts (langchain_openai , request_vcr ):
51
51
llm = langchain_openai .OpenAI ()
@@ -58,7 +58,7 @@ def test_openai_llm_sync_multiple_prompts(langchain_openai, request_vcr):
58
58
)
59
59
60
60
61
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
61
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
62
62
@pytest .mark .asyncio
63
63
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
64
64
async def test_openai_llm_async (langchain_openai , request_vcr ):
@@ -67,7 +67,7 @@ async def test_openai_llm_async(langchain_openai, request_vcr):
67
67
await llm .agenerate (["Which team won the 2019 NBA finals?" ])
68
68
69
69
70
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
70
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
71
71
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
72
72
def test_openai_llm_error (langchain , langchain_openai , request_vcr ):
73
73
import openai # Imported here because the os env OPENAI_API_KEY needs to be set via langchain fixture before import
@@ -83,7 +83,7 @@ def test_openai_llm_error(langchain, langchain_openai, request_vcr):
83
83
llm .generate ([12345 , 123456 ])
84
84
85
85
86
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
86
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
87
87
@pytest .mark .skipif (LANGCHAIN_VERSION < (0 , 2 ), reason = "Requires separate cassette for langchain v0.1" )
88
88
@pytest .mark .snapshot
89
89
def test_cohere_llm_sync (langchain_cohere , request_vcr ):
@@ -92,7 +92,7 @@ def test_cohere_llm_sync(langchain_cohere, request_vcr):
92
92
llm .invoke ("What is the secret Krabby Patty recipe?" )
93
93
94
94
95
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
95
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
96
96
@pytest .mark .skipif (
97
97
LANGCHAIN_VERSION < (0 , 2 ) or sys .version_info < (3 , 10 ),
98
98
reason = "Requires separate cassette for langchain v0.1, Python 3.9" ,
@@ -106,15 +106,15 @@ def test_ai21_llm_sync(langchain_community, request_vcr):
106
106
llm .invoke ("Why does everyone in Bikini Bottom hate Plankton?" )
107
107
108
108
109
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
109
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
110
110
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
111
111
def test_openai_chat_model_sync_call_langchain_openai (langchain_openai , request_vcr ):
112
112
chat = langchain_openai .ChatOpenAI (temperature = 0 , max_tokens = 256 )
113
113
with request_vcr .use_cassette ("openai_chat_completion_sync_call.yaml" ):
114
114
chat .invoke (input = [langchain .schema .HumanMessage (content = "When do you use 'whom' instead of 'who'?" )])
115
115
116
116
117
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
117
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
118
118
@pytest .mark .skipif (LANGCHAIN_VERSION < (0 , 3 ), reason = "Requires at least LangChain 0.3" )
119
119
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
120
120
def test_openai_chat_model_sync_generate (langchain_openai , request_vcr ):
@@ -136,7 +136,7 @@ def test_openai_chat_model_sync_generate(langchain_openai, request_vcr):
136
136
)
137
137
138
138
139
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
139
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
140
140
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
141
141
def test_openai_chat_model_vision_generate (langchain_openai , request_vcr ):
142
142
"""
@@ -166,7 +166,7 @@ def test_openai_chat_model_vision_generate(langchain_openai, request_vcr):
166
166
)
167
167
168
168
169
- # @flaky(until=1735812000, reason="Batch call has a non-deterministic response order.")
169
+ @flaky (until = 1735812000 , reason = "Batch call has a non-deterministic response order." )
170
170
@pytest .mark .asyncio
171
171
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
172
172
async def test_openai_chat_model_async_generate (langchain_openai , request_vcr ):
@@ -232,7 +232,7 @@ def test_pinecone_vectorstore_similarity_search(langchain_openai, request_vcr):
232
232
vectorstore .similarity_search ("Who was Alan Turing?" , 1 )
233
233
234
234
235
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
235
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
236
236
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
237
237
def test_lcel_chain_simple (langchain_core , langchain_openai , request_vcr ):
238
238
prompt = langchain_core .prompts .ChatPromptTemplate .from_messages (
@@ -245,7 +245,7 @@ def test_lcel_chain_simple(langchain_core, langchain_openai, request_vcr):
245
245
chain .invoke ({"input" : "how can langsmith help with testing?" })
246
246
247
247
248
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
248
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
249
249
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
250
250
def test_lcel_chain_complicated (langchain_core , langchain_openai , request_vcr ):
251
251
prompt = langchain_core .prompts .ChatPromptTemplate .from_template (
@@ -275,7 +275,7 @@ def test_lcel_chain_complicated(langchain_core, langchain_openai, request_vcr):
275
275
chain .invoke ({"topic" : "chickens" , "style" : "a 90s rapper" })
276
276
277
277
278
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
278
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
279
279
@pytest .mark .asyncio
280
280
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
281
281
async def test_lcel_chain_simple_async (langchain_core , langchain_openai , request_vcr ):
@@ -289,7 +289,7 @@ async def test_lcel_chain_simple_async(langchain_core, langchain_openai, request
289
289
await chain .ainvoke ({"input" : "how can langsmith help with testing?" })
290
290
291
291
292
- # @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
292
+ @flaky (1735812000 , reason = "batch() is non-deterministic in which order it processes inputs" )
293
293
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
294
294
@pytest .mark .skipif (sys .version_info >= (3 , 11 ), reason = "Python <3.11 test" )
295
295
def test_lcel_chain_batch (langchain_core , langchain_openai , request_vcr ):
@@ -306,7 +306,7 @@ def test_lcel_chain_batch(langchain_core, langchain_openai, request_vcr):
306
306
chain .batch (inputs = ["chickens" , "pigs" ])
307
307
308
308
309
- # @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
309
+ @flaky (1735812000 , reason = "batch() is non-deterministic in which order it processes inputs" )
310
310
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
311
311
@pytest .mark .skipif (sys .version_info < (3 , 11 ), reason = "Python 3.11+ required" )
312
312
def test_lcel_chain_batch_311 (langchain_core , langchain_openai , request_vcr ):
@@ -323,7 +323,7 @@ def test_lcel_chain_batch_311(langchain_core, langchain_openai, request_vcr):
323
323
chain .batch (inputs = ["chickens" , "pigs" ])
324
324
325
325
326
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
326
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
327
327
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
328
328
def test_lcel_chain_nested (langchain_core , langchain_openai , request_vcr ):
329
329
"""
@@ -347,7 +347,7 @@ def test_lcel_chain_nested(langchain_core, langchain_openai, request_vcr):
347
347
complete_chain .invoke ({"person" : "Spongebob Squarepants" , "language" : "Spanish" })
348
348
349
349
350
- # @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
350
+ @flaky (1735812000 , reason = "batch() is non-deterministic in which order it processes inputs" )
351
351
@pytest .mark .asyncio
352
352
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
353
353
async def test_lcel_chain_batch_async (langchain_core , langchain_openai , request_vcr ):
@@ -376,7 +376,7 @@ def test_lcel_chain_non_dict_input(langchain_core):
376
376
sequence .invoke (1 )
377
377
378
378
379
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
379
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
380
380
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
381
381
def test_lcel_with_tools_openai (langchain_core , langchain_openai , request_vcr ):
382
382
import langchain_core .tools
@@ -397,7 +397,7 @@ def add(a: int, b: int) -> int:
397
397
llm_with_tools .invoke ("What is the sum of 1 and 2?" )
398
398
399
399
400
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
400
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
401
401
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
402
402
def test_lcel_with_tools_anthropic (langchain_core , langchain_anthropic , request_vcr ):
403
403
import langchain_core .tools
@@ -432,7 +432,7 @@ def test_faiss_vectorstore_retrieval(langchain_community, langchain_openai, requ
432
432
retriever .invoke ("What was the message of the last test query?" )
433
433
434
434
435
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
435
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
436
436
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
437
437
def test_streamed_chain (langchain_core , langchain_openai , streamed_response_responder ):
438
438
client = streamed_response_responder (
@@ -454,7 +454,7 @@ def test_streamed_chain(langchain_core, langchain_openai, streamed_response_resp
454
454
pass
455
455
456
456
457
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
457
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
458
458
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
459
459
def test_streamed_chat (langchain_openai , streamed_response_responder ):
460
460
client = streamed_response_responder (
@@ -470,7 +470,7 @@ def test_streamed_chat(langchain_openai, streamed_response_responder):
470
470
pass
471
471
472
472
473
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
473
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
474
474
@pytest .mark .snapshot (ignores = IGNORE_FIELDS )
475
475
def test_streamed_llm (langchain_openai , streamed_response_responder ):
476
476
client = streamed_response_responder (
@@ -487,7 +487,7 @@ def test_streamed_llm(langchain_openai, streamed_response_responder):
487
487
pass
488
488
489
489
490
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
490
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
491
491
@pytest .mark .snapshot (
492
492
ignores = IGNORE_FIELDS ,
493
493
token = "tests.contrib.langchain.test_langchain.test_streamed_chain" ,
@@ -512,7 +512,7 @@ async def test_astreamed_chain(langchain_core, langchain_openai, async_streamed_
512
512
pass
513
513
514
514
515
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
515
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
516
516
@pytest .mark .snapshot (
517
517
ignores = IGNORE_FIELDS ,
518
518
token = "tests.contrib.langchain.test_langchain.test_streamed_chat" ,
@@ -532,7 +532,7 @@ async def test_astreamed_chat(langchain_openai, async_streamed_response_responde
532
532
pass
533
533
534
534
535
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
535
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
536
536
@pytest .mark .snapshot (
537
537
ignores = IGNORE_FIELDS ,
538
538
token = "tests.contrib.langchain.test_langchain.test_streamed_llm" ,
@@ -552,7 +552,7 @@ async def test_astreamed_llm(langchain_openai, async_streamed_response_responder
552
552
pass
553
553
554
554
555
- # @flaky(until=1754218112, reason="Problematic test that needs fixing")
555
+ @flaky (until = 1754218112 , reason = "Problematic test that needs fixing" )
556
556
@pytest .mark .snapshot (ignores = (IGNORE_FIELDS + ["meta.langchain.request.inputs.0" ]))
557
557
def test_streamed_json_output_parser (langchain , langchain_core , langchain_openai , streamed_response_responder ):
558
558
client = streamed_response_responder (
0 commit comments