Skip to content

Commit e8f7430

Browse files
committed
fix fmt
1 parent f9628f9 commit e8f7430

File tree

4 files changed

+29
-34
lines changed

4 files changed

+29
-34
lines changed

ddtrace/contrib/internal/langchain/constants.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,4 +80,3 @@
8080
}
8181

8282
API_KEY = "langchain.request.api_key"
83-
MODEL = "langchain.request.model"

ddtrace/contrib/internal/langchain/patch.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,6 @@
4141

4242
from ddtrace import config
4343
from ddtrace.contrib.internal.langchain.constants import API_KEY
44-
from ddtrace.contrib.internal.langchain.constants import COMPLETION_TOKENS
45-
from ddtrace.contrib.internal.langchain.constants import MODEL
46-
from ddtrace.contrib.internal.langchain.constants import PROMPT_TOKENS
47-
from ddtrace.contrib.internal.langchain.constants import TOTAL_COST
4844
from ddtrace.contrib.internal.langchain.constants import agent_output_parser_classes
4945
from ddtrace.contrib.internal.langchain.constants import text_embedding_models
5046
from ddtrace.contrib.internal.langchain.constants import vectorstore_classes
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
---
2-
upgrades:
2+
upgrade:
33
- |
44
langchain: Removes the `langchain.tokens.total_cost` span metric for OpenAI calls.
55
For continued cost estimation of OpenAI calls, enable `LLM Observability <https://docs.datadoghq.com/llm_observability/>`_.

tests/contrib/langchain/test_langchain.py

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99

1010
from ddtrace.internal.utils.version import parse_version
1111
from tests.contrib.langchain.utils import get_request_vcr
12-
# from tests.utils import flaky
12+
13+
from tests.utils import flaky
1314

1415

1516
LANGCHAIN_VERSION = parse_version(langchain.__version__)
@@ -23,7 +24,6 @@
2324
"meta.langchain.request.openai.parameters.logprobs",
2425
"meta.langchain.request.openai.parameters.seed", # langchain-openai llm call now includes seed as param
2526
"meta.langchain.request.openai.parameters.logprobs", # langchain-openai llm call now includes seed as param
26-
"metrics.langchain.tokens.total_cost", # total_cost depends on if tiktoken is installed
2727
# these are sometimes named differently
2828
"meta.langchain.request.openai.parameters.max_tokens",
2929
"meta.langchain.request.openai.parameters.max_completion_tokens",
@@ -37,15 +37,15 @@ def request_vcr():
3737
yield get_request_vcr()
3838

3939

40-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
40+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
4141
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
4242
def test_openai_llm_sync(langchain_openai, request_vcr):
4343
llm = langchain_openai.OpenAI()
4444
with request_vcr.use_cassette("openai_completion_sync.yaml"):
4545
llm.invoke("Can you explain what Descartes meant by 'I think, therefore I am'?")
4646

4747

48-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
48+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
4949
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
5050
def test_openai_llm_sync_multiple_prompts(langchain_openai, request_vcr):
5151
llm = langchain_openai.OpenAI()
@@ -58,7 +58,7 @@ def test_openai_llm_sync_multiple_prompts(langchain_openai, request_vcr):
5858
)
5959

6060

61-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
61+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
6262
@pytest.mark.asyncio
6363
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
6464
async def test_openai_llm_async(langchain_openai, request_vcr):
@@ -67,7 +67,7 @@ async def test_openai_llm_async(langchain_openai, request_vcr):
6767
await llm.agenerate(["Which team won the 2019 NBA finals?"])
6868

6969

70-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
70+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
7171
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
7272
def test_openai_llm_error(langchain, langchain_openai, request_vcr):
7373
import openai # Imported here because the os env OPENAI_API_KEY needs to be set via langchain fixture before import
@@ -83,7 +83,7 @@ def test_openai_llm_error(langchain, langchain_openai, request_vcr):
8383
llm.generate([12345, 123456])
8484

8585

86-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
86+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
8787
@pytest.mark.skipif(LANGCHAIN_VERSION < (0, 2), reason="Requires separate cassette for langchain v0.1")
8888
@pytest.mark.snapshot
8989
def test_cohere_llm_sync(langchain_cohere, request_vcr):
@@ -92,7 +92,7 @@ def test_cohere_llm_sync(langchain_cohere, request_vcr):
9292
llm.invoke("What is the secret Krabby Patty recipe?")
9393

9494

95-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
95+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
9696
@pytest.mark.skipif(
9797
LANGCHAIN_VERSION < (0, 2) or sys.version_info < (3, 10),
9898
reason="Requires separate cassette for langchain v0.1, Python 3.9",
@@ -106,15 +106,15 @@ def test_ai21_llm_sync(langchain_community, request_vcr):
106106
llm.invoke("Why does everyone in Bikini Bottom hate Plankton?")
107107

108108

109-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
109+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
110110
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
111111
def test_openai_chat_model_sync_call_langchain_openai(langchain_openai, request_vcr):
112112
chat = langchain_openai.ChatOpenAI(temperature=0, max_tokens=256)
113113
with request_vcr.use_cassette("openai_chat_completion_sync_call.yaml"):
114114
chat.invoke(input=[langchain.schema.HumanMessage(content="When do you use 'whom' instead of 'who'?")])
115115

116116

117-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
117+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
118118
@pytest.mark.skipif(LANGCHAIN_VERSION < (0, 3), reason="Requires at least LangChain 0.3")
119119
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
120120
def test_openai_chat_model_sync_generate(langchain_openai, request_vcr):
@@ -136,7 +136,7 @@ def test_openai_chat_model_sync_generate(langchain_openai, request_vcr):
136136
)
137137

138138

139-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
139+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
140140
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
141141
def test_openai_chat_model_vision_generate(langchain_openai, request_vcr):
142142
"""
@@ -166,7 +166,7 @@ def test_openai_chat_model_vision_generate(langchain_openai, request_vcr):
166166
)
167167

168168

169-
# @flaky(until=1735812000, reason="Batch call has a non-deterministic response order.")
169+
@flaky(until=1735812000, reason="Batch call has a non-deterministic response order.")
170170
@pytest.mark.asyncio
171171
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
172172
async def test_openai_chat_model_async_generate(langchain_openai, request_vcr):
@@ -232,7 +232,7 @@ def test_pinecone_vectorstore_similarity_search(langchain_openai, request_vcr):
232232
vectorstore.similarity_search("Who was Alan Turing?", 1)
233233

234234

235-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
235+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
236236
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
237237
def test_lcel_chain_simple(langchain_core, langchain_openai, request_vcr):
238238
prompt = langchain_core.prompts.ChatPromptTemplate.from_messages(
@@ -245,7 +245,7 @@ def test_lcel_chain_simple(langchain_core, langchain_openai, request_vcr):
245245
chain.invoke({"input": "how can langsmith help with testing?"})
246246

247247

248-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
248+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
249249
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
250250
def test_lcel_chain_complicated(langchain_core, langchain_openai, request_vcr):
251251
prompt = langchain_core.prompts.ChatPromptTemplate.from_template(
@@ -275,7 +275,7 @@ def test_lcel_chain_complicated(langchain_core, langchain_openai, request_vcr):
275275
chain.invoke({"topic": "chickens", "style": "a 90s rapper"})
276276

277277

278-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
278+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
279279
@pytest.mark.asyncio
280280
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
281281
async def test_lcel_chain_simple_async(langchain_core, langchain_openai, request_vcr):
@@ -289,7 +289,7 @@ async def test_lcel_chain_simple_async(langchain_core, langchain_openai, request
289289
await chain.ainvoke({"input": "how can langsmith help with testing?"})
290290

291291

292-
# @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
292+
@flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
293293
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
294294
@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Python <3.11 test")
295295
def test_lcel_chain_batch(langchain_core, langchain_openai, request_vcr):
@@ -306,7 +306,7 @@ def test_lcel_chain_batch(langchain_core, langchain_openai, request_vcr):
306306
chain.batch(inputs=["chickens", "pigs"])
307307

308308

309-
# @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
309+
@flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
310310
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
311311
@pytest.mark.skipif(sys.version_info < (3, 11), reason="Python 3.11+ required")
312312
def test_lcel_chain_batch_311(langchain_core, langchain_openai, request_vcr):
@@ -323,7 +323,7 @@ def test_lcel_chain_batch_311(langchain_core, langchain_openai, request_vcr):
323323
chain.batch(inputs=["chickens", "pigs"])
324324

325325

326-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
326+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
327327
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
328328
def test_lcel_chain_nested(langchain_core, langchain_openai, request_vcr):
329329
"""
@@ -347,7 +347,7 @@ def test_lcel_chain_nested(langchain_core, langchain_openai, request_vcr):
347347
complete_chain.invoke({"person": "Spongebob Squarepants", "language": "Spanish"})
348348

349349

350-
# @flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
350+
@flaky(1735812000, reason="batch() is non-deterministic in which order it processes inputs")
351351
@pytest.mark.asyncio
352352
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
353353
async def test_lcel_chain_batch_async(langchain_core, langchain_openai, request_vcr):
@@ -376,7 +376,7 @@ def test_lcel_chain_non_dict_input(langchain_core):
376376
sequence.invoke(1)
377377

378378

379-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
379+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
380380
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
381381
def test_lcel_with_tools_openai(langchain_core, langchain_openai, request_vcr):
382382
import langchain_core.tools
@@ -397,7 +397,7 @@ def add(a: int, b: int) -> int:
397397
llm_with_tools.invoke("What is the sum of 1 and 2?")
398398

399399

400-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
400+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
401401
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
402402
def test_lcel_with_tools_anthropic(langchain_core, langchain_anthropic, request_vcr):
403403
import langchain_core.tools
@@ -432,7 +432,7 @@ def test_faiss_vectorstore_retrieval(langchain_community, langchain_openai, requ
432432
retriever.invoke("What was the message of the last test query?")
433433

434434

435-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
435+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
436436
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
437437
def test_streamed_chain(langchain_core, langchain_openai, streamed_response_responder):
438438
client = streamed_response_responder(
@@ -454,7 +454,7 @@ def test_streamed_chain(langchain_core, langchain_openai, streamed_response_resp
454454
pass
455455

456456

457-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
457+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
458458
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
459459
def test_streamed_chat(langchain_openai, streamed_response_responder):
460460
client = streamed_response_responder(
@@ -470,7 +470,7 @@ def test_streamed_chat(langchain_openai, streamed_response_responder):
470470
pass
471471

472472

473-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
473+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
474474
@pytest.mark.snapshot(ignores=IGNORE_FIELDS)
475475
def test_streamed_llm(langchain_openai, streamed_response_responder):
476476
client = streamed_response_responder(
@@ -487,7 +487,7 @@ def test_streamed_llm(langchain_openai, streamed_response_responder):
487487
pass
488488

489489

490-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
490+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
491491
@pytest.mark.snapshot(
492492
ignores=IGNORE_FIELDS,
493493
token="tests.contrib.langchain.test_langchain.test_streamed_chain",
@@ -512,7 +512,7 @@ async def test_astreamed_chain(langchain_core, langchain_openai, async_streamed_
512512
pass
513513

514514

515-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
515+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
516516
@pytest.mark.snapshot(
517517
ignores=IGNORE_FIELDS,
518518
token="tests.contrib.langchain.test_langchain.test_streamed_chat",
@@ -532,7 +532,7 @@ async def test_astreamed_chat(langchain_openai, async_streamed_response_responde
532532
pass
533533

534534

535-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
535+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
536536
@pytest.mark.snapshot(
537537
ignores=IGNORE_FIELDS,
538538
token="tests.contrib.langchain.test_langchain.test_streamed_llm",
@@ -552,7 +552,7 @@ async def test_astreamed_llm(langchain_openai, async_streamed_response_responder
552552
pass
553553

554554

555-
# @flaky(until=1754218112, reason="Problematic test that needs fixing")
555+
@flaky(until=1754218112, reason="Problematic test that needs fixing")
556556
@pytest.mark.snapshot(ignores=(IGNORE_FIELDS + ["meta.langchain.request.inputs.0"]))
557557
def test_streamed_json_output_parser(langchain, langchain_core, langchain_openai, streamed_response_responder):
558558
client = streamed_response_responder(

0 commit comments

Comments
 (0)