Skip to content

Commit b8018c5

Browse files
Ensure that the Google GenAI SDK instrumentation correctly populates "finish_reasons" on the span. (#3417)
* Fix bug where 'gen_ai.response.finish_reasons' was not being correctly populated. * Update changelog. * Reformat with ruff.
1 parent ce90639 commit b8018c5

File tree

3 files changed

+159
-0
lines changed

3 files changed

+159
-0
lines changed

instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md

+3
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
99

1010
- Restructure tests to keep in line with repository conventions ([#3344](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3344))
1111

12+
- Fix [bug](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3416) where
13+
span attribute `gen_ai.response.finish_reasons` is empty ([#3417](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3417))
14+
1215
## Version 0.1b0 (2025-03-05)
1316

1417
- Add support for async and streaming.

instrumentation-genai/opentelemetry-instrumentation-google-genai/src/opentelemetry/instrumentation/google_genai/generate_content.py

+13
Original file line numberDiff line numberDiff line change
@@ -252,6 +252,7 @@ def process_response(self, response: GenerateContentResponse):
252252
# need to be reflected back into the span attributes.
253253
#
254254
# See also: TODOS.md.
255+
self._update_finish_reasons(response)
255256
self._maybe_update_token_counts(response)
256257
self._maybe_update_error_type(response)
257258
self._maybe_log_response(response)
@@ -275,6 +276,18 @@ def finalize_processing(self):
275276
self._record_token_usage_metric()
276277
self._record_duration_metric()
277278

279+
def _update_finish_reasons(self, response):
280+
if not response.candidates:
281+
return
282+
for candidate in response.candidates:
283+
finish_reason = candidate.finish_reason
284+
if finish_reason is None:
285+
continue
286+
finish_reason_str = finish_reason.name.lower().removeprefix(
287+
"finish_reason_"
288+
)
289+
self._finish_reasons_set.add(finish_reason_str)
290+
278291
def _maybe_update_token_counts(self, response: GenerateContentResponse):
279292
input_tokens = _get_response_property(
280293
response, "usage_metadata.prompt_token_count"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
# Copyright The OpenTelemetry Authors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
16+
from google.genai import types as genai_types
17+
18+
from .base import TestCase
19+
20+
21+
class FinishReasonsTestCase(TestCase):
22+
def generate_and_get_span_finish_reasons(self):
23+
self.client.models.generate_content(
24+
model="gemini-2.5-flash-001", contents="Some prompt"
25+
)
26+
span = self.otel.get_span_named(
27+
"generate_content gemini-2.5-flash-001"
28+
)
29+
assert span is not None
30+
assert "gen_ai.response.finish_reasons" in span.attributes
31+
return list(span.attributes["gen_ai.response.finish_reasons"])
32+
33+
def test_single_candidate_with_valid_reason(self):
34+
self.configure_valid_response(
35+
candidate=genai_types.Candidate(
36+
finish_reason=genai_types.FinishReason.STOP
37+
)
38+
)
39+
self.assertEqual(self.generate_and_get_span_finish_reasons(), ["stop"])
40+
41+
def test_single_candidate_with_safety_reason(self):
42+
self.configure_valid_response(
43+
candidate=genai_types.Candidate(
44+
finish_reason=genai_types.FinishReason.SAFETY
45+
)
46+
)
47+
self.assertEqual(
48+
self.generate_and_get_span_finish_reasons(), ["safety"]
49+
)
50+
51+
def test_single_candidate_with_max_tokens_reason(self):
52+
self.configure_valid_response(
53+
candidate=genai_types.Candidate(
54+
finish_reason=genai_types.FinishReason.MAX_TOKENS
55+
)
56+
)
57+
self.assertEqual(
58+
self.generate_and_get_span_finish_reasons(), ["max_tokens"]
59+
)
60+
61+
def test_single_candidate_with_no_reason(self):
62+
self.configure_valid_response(
63+
candidate=genai_types.Candidate(finish_reason=None)
64+
)
65+
self.assertEqual(self.generate_and_get_span_finish_reasons(), [])
66+
67+
def test_single_candidate_with_unspecified_reason(self):
68+
self.configure_valid_response(
69+
candidate=genai_types.Candidate(
70+
finish_reason=genai_types.FinishReason.FINISH_REASON_UNSPECIFIED
71+
)
72+
)
73+
self.assertEqual(
74+
self.generate_and_get_span_finish_reasons(), ["unspecified"]
75+
)
76+
77+
def test_multiple_candidates_with_valid_reasons(self):
78+
self.configure_valid_response(
79+
candidates=[
80+
genai_types.Candidate(
81+
finish_reason=genai_types.FinishReason.MAX_TOKENS
82+
),
83+
genai_types.Candidate(
84+
finish_reason=genai_types.FinishReason.STOP
85+
),
86+
]
87+
)
88+
self.assertEqual(
89+
self.generate_and_get_span_finish_reasons(), ["max_tokens", "stop"]
90+
)
91+
92+
def test_sorts_finish_reasons(self):
93+
self.configure_valid_response(
94+
candidates=[
95+
genai_types.Candidate(
96+
finish_reason=genai_types.FinishReason.STOP
97+
),
98+
genai_types.Candidate(
99+
finish_reason=genai_types.FinishReason.MAX_TOKENS
100+
),
101+
genai_types.Candidate(
102+
finish_reason=genai_types.FinishReason.SAFETY
103+
),
104+
]
105+
)
106+
self.assertEqual(
107+
self.generate_and_get_span_finish_reasons(),
108+
["max_tokens", "safety", "stop"],
109+
)
110+
111+
def test_deduplicates_finish_reasons(self):
112+
self.configure_valid_response(
113+
candidates=[
114+
genai_types.Candidate(
115+
finish_reason=genai_types.FinishReason.STOP
116+
),
117+
genai_types.Candidate(
118+
finish_reason=genai_types.FinishReason.MAX_TOKENS
119+
),
120+
genai_types.Candidate(
121+
finish_reason=genai_types.FinishReason.STOP
122+
),
123+
genai_types.Candidate(
124+
finish_reason=genai_types.FinishReason.STOP
125+
),
126+
genai_types.Candidate(
127+
finish_reason=genai_types.FinishReason.SAFETY
128+
),
129+
genai_types.Candidate(
130+
finish_reason=genai_types.FinishReason.STOP
131+
),
132+
genai_types.Candidate(
133+
finish_reason=genai_types.FinishReason.STOP
134+
),
135+
genai_types.Candidate(
136+
finish_reason=genai_types.FinishReason.STOP
137+
),
138+
]
139+
)
140+
self.assertEqual(
141+
self.generate_and_get_span_finish_reasons(),
142+
["max_tokens", "safety", "stop"],
143+
)

0 commit comments

Comments
 (0)