diff --git a/ddtrace/contrib/internal/langchain/constants.py b/ddtrace/contrib/internal/langchain/constants.py index cdc0fc47cc2..40ea9e7a993 100644 --- a/ddtrace/contrib/internal/langchain/constants.py +++ b/ddtrace/contrib/internal/langchain/constants.py @@ -80,7 +80,3 @@ } API_KEY = "langchain.request.api_key" -MODEL = "langchain.request.model" -COMPLETION_TOKENS = "langchain.tokens.completion_tokens" -PROMPT_TOKENS = "langchain.tokens.prompt_tokens" -TOTAL_COST = "langchain.tokens.total_cost" diff --git a/ddtrace/contrib/internal/langchain/patch.py b/ddtrace/contrib/internal/langchain/patch.py index f9681dd1302..58c635dc46f 100644 --- a/ddtrace/contrib/internal/langchain/patch.py +++ b/ddtrace/contrib/internal/langchain/patch.py @@ -41,10 +41,6 @@ from ddtrace import config from ddtrace.contrib.internal.langchain.constants import API_KEY -from ddtrace.contrib.internal.langchain.constants import COMPLETION_TOKENS -from ddtrace.contrib.internal.langchain.constants import MODEL -from ddtrace.contrib.internal.langchain.constants import PROMPT_TOKENS -from ddtrace.contrib.internal.langchain.constants import TOTAL_COST from ddtrace.contrib.internal.langchain.constants import agent_output_parser_classes from ddtrace.contrib.internal.langchain.constants import text_embedding_models from ddtrace.contrib.internal.langchain.constants import vectorstore_classes @@ -114,9 +110,7 @@ def _extract_api_key(instance: Any) -> str: return "" -def _tag_openai_token_usage( - span: Span, llm_output: Dict[str, Any], propagated_cost: int = 0, propagate: bool = False -) -> None: +def _tag_openai_token_usage(span: Span, llm_output: Dict[str, Any]) -> None: """ Extract token usage from llm_output, tag on span. Calculate the total cost for each LLM/chat_model, then propagate those values up the trace so that @@ -126,23 +120,6 @@ def _tag_openai_token_usage( current_metric_value = span.get_metric("langchain.tokens.%s_tokens" % token_type) or 0 metric_value = llm_output["token_usage"].get("%s_tokens" % token_type, 0) span.set_metric("langchain.tokens.%s_tokens" % token_type, current_metric_value + metric_value) - total_cost = span.get_metric(TOTAL_COST) or 0 - if not propagate and get_openai_token_cost_for_model: - try: - completion_cost = get_openai_token_cost_for_model( - span.get_tag(MODEL), - span.get_metric(COMPLETION_TOKENS), - is_completion=True, - ) - prompt_cost = get_openai_token_cost_for_model(span.get_tag(MODEL), span.get_metric(PROMPT_TOKENS)) - total_cost = completion_cost + prompt_cost - except ValueError: - # If not in langchain's openai model catalog, the above helpers will raise a ValueError. - log.debug("Cannot calculate token/cost as the model is not in LangChain's OpenAI model catalog.") - if get_openai_token_cost_for_model: - span.set_metric(TOTAL_COST, propagated_cost + total_cost) - if span._parent is not None: - _tag_openai_token_usage(span._parent, llm_output, propagated_cost=propagated_cost + total_cost, propagate=True) def _is_openai_llm_instance(instance): diff --git a/releasenotes/notes/deprecate-langchain-openai-cost-042f3a04cda9d23b.yaml b/releasenotes/notes/deprecate-langchain-openai-cost-042f3a04cda9d23b.yaml new file mode 100644 index 00000000000..c2c589693ab --- /dev/null +++ b/releasenotes/notes/deprecate-langchain-openai-cost-042f3a04cda9d23b.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + langchain: Removes the `langchain.tokens.total_cost` span metric for OpenAI calls. + For continued cost estimation of OpenAI calls, enable `LLM Observability `_. diff --git a/tests/contrib/langchain/test_langchain.py b/tests/contrib/langchain/test_langchain.py index 86a14f524d9..f26edb2bca7 100644 --- a/tests/contrib/langchain/test_langchain.py +++ b/tests/contrib/langchain/test_langchain.py @@ -23,7 +23,6 @@ "meta.langchain.request.openai.parameters.logprobs", "meta.langchain.request.openai.parameters.seed", # langchain-openai llm call now includes seed as param "meta.langchain.request.openai.parameters.logprobs", # langchain-openai llm call now includes seed as param - "metrics.langchain.tokens.total_cost", # total_cost depends on if tiktoken is installed # these are sometimes named differently "meta.langchain.request.openai.parameters.max_tokens", "meta.langchain.request.openai.parameters.max_completion_tokens", diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json index 4788830f558..1d3c08d174c 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_batch.json @@ -61,7 +61,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 16, "langchain.tokens.prompt_tokens": 14, - "langchain.tokens.total_cost": 5.3e-05, "langchain.tokens.total_tokens": 30 }, "duration": 6742000, @@ -95,7 +94,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 15, "langchain.tokens.prompt_tokens": 14, - "langchain.tokens.total_cost": 5.1000000000000006e-05, "langchain.tokens.total_tokens": 29 }, "duration": 3314000, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json index 92121180dfd..ed0c9e0f55d 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_complicated.json @@ -25,7 +25,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 19, "langchain.tokens.prompt_tokens": 53, - "langchain.tokens.total_cost": 0.0001175, "langchain.tokens.total_tokens": 72, "process_id": 82010 }, @@ -60,7 +59,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 19, "langchain.tokens.prompt_tokens": 53, - "langchain.tokens.total_cost": 0.0001175, "langchain.tokens.total_tokens": 72 }, "duration": 3680000, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_nested.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_nested.json index 3910e614529..f63c58f92e3 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_nested.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_nested.json @@ -25,7 +25,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 53, "langchain.tokens.prompt_tokens": 50, - "langchain.tokens.total_cost": 0.000181, "langchain.tokens.total_tokens": 103, "process_id": 82010 }, @@ -51,7 +50,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 16, "langchain.tokens.prompt_tokens": 18, - "langchain.tokens.total_cost": 5.9e-05, "langchain.tokens.total_tokens": 34 }, "duration": 63808000, @@ -85,7 +83,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 16, "langchain.tokens.prompt_tokens": 18, - "langchain.tokens.total_cost": 5.9e-05, "langchain.tokens.total_tokens": 34 }, "duration": 61552000, @@ -119,7 +116,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 37, "langchain.tokens.prompt_tokens": 32, - "langchain.tokens.total_cost": 0.000122, "langchain.tokens.total_tokens": 69 }, "duration": 3288000, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json index 9548083a9ee..66254abefd3 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple.json @@ -24,7 +24,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 101, "langchain.tokens.prompt_tokens": 20, - "langchain.tokens.total_cost": 0.000232, "langchain.tokens.total_tokens": 121, "process_id": 82010 }, @@ -61,7 +60,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 101, "langchain.tokens.prompt_tokens": 20, - "langchain.tokens.total_cost": 0.000232, "langchain.tokens.total_tokens": 121 }, "duration": 6142000, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json index a0c1a1e6c53..35d458d43f5 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_chain_simple_async.json @@ -24,7 +24,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 78, "langchain.tokens.prompt_tokens": 20, - "langchain.tokens.total_cost": 0.000186, "langchain.tokens.total_tokens": 98, "process_id": 82010 }, @@ -61,7 +60,6 @@ "_dd.measured": 1, "langchain.tokens.completion_tokens": 78, "langchain.tokens.prompt_tokens": 20, - "langchain.tokens.total_cost": 0.000186, "langchain.tokens.total_tokens": 98 }, "duration": 3399000, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json index 2b865bb7371..99c9f2d5c3e 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_lcel_with_tools_openai.json @@ -37,7 +37,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 32, "langchain.tokens.prompt_tokens": 85, - "langchain.tokens.total_cost": 9.05e-05, "langchain.tokens.total_tokens": 117, "process_id": 82010 }, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_async_generate.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_async_generate.json index 57fe006a65a..55b112ca6b6 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_async_generate.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_async_generate.json @@ -43,7 +43,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 150, "langchain.tokens.prompt_tokens": 60, - "langchain.tokens.total_cost": 0.00038999999999999994, "langchain.tokens.total_tokens": 210, "process_id": 82010 }, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json index dd9e0c392da..0d4c995aba4 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_call_langchain_openai.json @@ -35,7 +35,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 83, "langchain.tokens.prompt_tokens": 20, - "langchain.tokens.total_cost": 0.00019600000000000002, "langchain.tokens.total_tokens": 103, "process_id": 82010 }, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json index f5de86a6836..6e188b6629c 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_sync_generate.json @@ -43,7 +43,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 110, "langchain.tokens.prompt_tokens": 60, - "langchain.tokens.total_cost": 0.00031, "langchain.tokens.total_tokens": 170, "process_id": 82010 }, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json index 38bf8bc9a95..d02c31a219a 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_chat_model_vision_generate.json @@ -35,7 +35,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 56, "langchain.tokens.prompt_tokens": 1151, - "langchain.tokens.total_cost": 0, "langchain.tokens.total_tokens": 1207, "process_id": 34354 }, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json index f1a7884c2bf..5ef01923f7b 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_async.json @@ -36,7 +36,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 12, "langchain.tokens.prompt_tokens": 10, - "langchain.tokens.total_cost": 3.9e-05, "langchain.tokens.total_tokens": 22, "process_id": 82010 }, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json index a527a18d4a2..6c25e449a96 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync.json @@ -36,7 +36,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 256, "langchain.tokens.prompt_tokens": 17, - "langchain.tokens.total_cost": 0.0005375, "langchain.tokens.total_tokens": 273, "process_id": 82010 }, diff --git a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json index fb587526043..9d5f107c31b 100644 --- a/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json +++ b/tests/snapshots/tests.contrib.langchain.test_langchain.test_openai_llm_sync_multiple_prompts.json @@ -40,7 +40,6 @@ "_sampling_priority_v1": 1, "langchain.tokens.completion_tokens": 271, "langchain.tokens.prompt_tokens": 23, - "langchain.tokens.total_cost": 0.0005765000000000001, "langchain.tokens.total_tokens": 294, "process_id": 82010 },