Skip to content

Commit 08b7e49

Browse files
authored
feat(node): Add @vercel/ai instrumentation (#13892)
Adds Sentry tracing instrumentation for the [ai](https://www.npmjs.com/package/ai) library. For more information, see the [`ai` documentation](https://sdk.vercel.ai/docs/ai-sdk-core/telemetry). ```javascript const Sentry = require('@sentry/node'); Sentry.init({ integrations: [Sentry.vercelAIIntegration()], }); ``` By default this integration adds tracing support to all `ai` callsites. If you need to disable collecting spans for a specific call, you can do so by setting `experimental_telemetry.isEnabled` to `false` in the first argument of the function call. ```javascript const result = await generateText({ model: openai('gpt-4-turbo'), experimental_telemetry: { isEnabled: false }, }); ``` If you want to collect inputs and outputs for a specific call, you must specifically opt-in to each function call by setting `experimental_telemetry.recordInputs` and `experimental_telemetry.recordOutputs` to `true`. ```javascript const result = await generateText({ model: openai('gpt-4-turbo'), experimental_telemetry: { isEnabled: true, recordInputs: true, recordOutputs: true }, }); ```
1 parent 82fc95d commit 08b7e49

File tree

9 files changed

+623
-6
lines changed

9 files changed

+623
-6
lines changed

dev-packages/node-integration-tests/package.json

+1
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
"@types/mongodb": "^3.6.20",
3838
"@types/mysql": "^2.15.21",
3939
"@types/pg": "^8.6.5",
40+
"ai": "^4.0.6",
4041
"amqplib": "^0.10.4",
4142
"apollo-server": "^3.11.1",
4243
"axios": "^1.7.7",

dev-packages/node-integration-tests/suites/express/multiple-routers/common-infix/server.ts

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { loggingTransport } from '@sentry-internal/node-integration-tests';
22
import * as Sentry from '@sentry/node';
33

44
Sentry.init({
5+
debug: true,
56
dsn: 'https://[email protected]/1337',
67
release: '1.0',
78
tracesSampleRate: 1.0,
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
const { loggingTransport } = require('@sentry-internal/node-integration-tests');
2+
const Sentry = require('@sentry/node');
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
transport: loggingTransport,
9+
});
10+
11+
const { generateText } = require('ai');
12+
const { MockLanguageModelV1 } = require('ai/test');
13+
14+
async function run() {
15+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
16+
await generateText({
17+
model: new MockLanguageModelV1({
18+
doGenerate: async () => ({
19+
rawCall: { rawPrompt: null, rawSettings: {} },
20+
finishReason: 'stop',
21+
usage: { promptTokens: 10, completionTokens: 20 },
22+
text: 'First span here!',
23+
}),
24+
}),
25+
prompt: 'Where is the first span?',
26+
});
27+
28+
// This span should have input and output prompts attached because telemetry is explicitly enabled.
29+
await generateText({
30+
experimental_telemetry: { isEnabled: true },
31+
model: new MockLanguageModelV1({
32+
doGenerate: async () => ({
33+
rawCall: { rawPrompt: null, rawSettings: {} },
34+
finishReason: 'stop',
35+
usage: { promptTokens: 10, completionTokens: 20 },
36+
text: 'Second span here!',
37+
}),
38+
}),
39+
prompt: 'Where is the second span?',
40+
});
41+
42+
// This span should not be captured because we've disabled telemetry
43+
await generateText({
44+
experimental_telemetry: { isEnabled: false },
45+
model: new MockLanguageModelV1({
46+
doGenerate: async () => ({
47+
rawCall: { rawPrompt: null, rawSettings: {} },
48+
finishReason: 'stop',
49+
usage: { promptTokens: 10, completionTokens: 20 },
50+
text: 'Third span here!',
51+
}),
52+
}),
53+
prompt: 'Where is the third span?',
54+
});
55+
});
56+
}
57+
58+
run();
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
import { conditionalTest } from '../../../utils';
2+
import { cleanupChildProcesses, createRunner } from '../../../utils/runner';
3+
4+
// `ai` SDK only support Node 18+
5+
conditionalTest({ min: 18 })('ai', () => {
6+
afterAll(() => {
7+
cleanupChildProcesses();
8+
});
9+
10+
test('creates ai related spans', done => {
11+
const EXPECTED_TRANSACTION = {
12+
transaction: 'main',
13+
spans: expect.arrayContaining([
14+
expect.objectContaining({
15+
data: expect.objectContaining({
16+
'ai.completion_tokens.used': 20,
17+
'ai.model.id': 'mock-model-id',
18+
'ai.model.provider': 'mock-provider',
19+
'ai.model_id': 'mock-model-id',
20+
'ai.operationId': 'ai.generateText',
21+
'ai.pipeline.name': 'generateText',
22+
'ai.prompt_tokens.used': 10,
23+
'ai.response.finishReason': 'stop',
24+
'ai.settings.maxRetries': 2,
25+
'ai.settings.maxSteps': 1,
26+
'ai.streaming': false,
27+
'ai.total_tokens.used': 30,
28+
'ai.usage.completionTokens': 20,
29+
'ai.usage.promptTokens': 10,
30+
'operation.name': 'ai.generateText',
31+
'sentry.op': 'ai.pipeline.generateText',
32+
'sentry.origin': 'auto.vercelai.otel',
33+
}),
34+
description: 'generateText',
35+
op: 'ai.pipeline.generateText',
36+
origin: 'auto.vercelai.otel',
37+
status: 'ok',
38+
}),
39+
expect.objectContaining({
40+
data: expect.objectContaining({
41+
'sentry.origin': 'auto.vercelai.otel',
42+
'sentry.op': 'ai.run.doGenerate',
43+
'operation.name': 'ai.generateText.doGenerate',
44+
'ai.operationId': 'ai.generateText.doGenerate',
45+
'ai.model.provider': 'mock-provider',
46+
'ai.model.id': 'mock-model-id',
47+
'ai.settings.maxRetries': 2,
48+
'gen_ai.system': 'mock-provider',
49+
'gen_ai.request.model': 'mock-model-id',
50+
'ai.pipeline.name': 'generateText.doGenerate',
51+
'ai.model_id': 'mock-model-id',
52+
'ai.streaming': false,
53+
'ai.response.finishReason': 'stop',
54+
'ai.response.model': 'mock-model-id',
55+
'ai.usage.promptTokens': 10,
56+
'ai.usage.completionTokens': 20,
57+
'gen_ai.response.finish_reasons': ['stop'],
58+
'gen_ai.usage.input_tokens': 10,
59+
'gen_ai.usage.output_tokens': 20,
60+
'ai.completion_tokens.used': 20,
61+
'ai.prompt_tokens.used': 10,
62+
'ai.total_tokens.used': 30,
63+
}),
64+
description: 'generateText.doGenerate',
65+
op: 'ai.run.doGenerate',
66+
origin: 'auto.vercelai.otel',
67+
status: 'ok',
68+
}),
69+
expect.objectContaining({
70+
data: expect.objectContaining({
71+
'ai.completion_tokens.used': 20,
72+
'ai.model.id': 'mock-model-id',
73+
'ai.model.provider': 'mock-provider',
74+
'ai.model_id': 'mock-model-id',
75+
'ai.prompt': '{"prompt":"Where is the second span?"}',
76+
'ai.operationId': 'ai.generateText',
77+
'ai.pipeline.name': 'generateText',
78+
'ai.prompt_tokens.used': 10,
79+
'ai.response.finishReason': 'stop',
80+
'ai.input_messages': '{"prompt":"Where is the second span?"}',
81+
'ai.settings.maxRetries': 2,
82+
'ai.settings.maxSteps': 1,
83+
'ai.streaming': false,
84+
'ai.total_tokens.used': 30,
85+
'ai.usage.completionTokens': 20,
86+
'ai.usage.promptTokens': 10,
87+
'operation.name': 'ai.generateText',
88+
'sentry.op': 'ai.pipeline.generateText',
89+
'sentry.origin': 'auto.vercelai.otel',
90+
}),
91+
description: 'generateText',
92+
op: 'ai.pipeline.generateText',
93+
origin: 'auto.vercelai.otel',
94+
status: 'ok',
95+
}),
96+
expect.objectContaining({
97+
data: expect.objectContaining({
98+
'sentry.origin': 'auto.vercelai.otel',
99+
'sentry.op': 'ai.run.doGenerate',
100+
'operation.name': 'ai.generateText.doGenerate',
101+
'ai.operationId': 'ai.generateText.doGenerate',
102+
'ai.model.provider': 'mock-provider',
103+
'ai.model.id': 'mock-model-id',
104+
'ai.settings.maxRetries': 2,
105+
'gen_ai.system': 'mock-provider',
106+
'gen_ai.request.model': 'mock-model-id',
107+
'ai.pipeline.name': 'generateText.doGenerate',
108+
'ai.model_id': 'mock-model-id',
109+
'ai.streaming': false,
110+
'ai.response.finishReason': 'stop',
111+
'ai.response.model': 'mock-model-id',
112+
'ai.usage.promptTokens': 10,
113+
'ai.usage.completionTokens': 20,
114+
'gen_ai.response.finish_reasons': ['stop'],
115+
'gen_ai.usage.input_tokens': 10,
116+
'gen_ai.usage.output_tokens': 20,
117+
'ai.completion_tokens.used': 20,
118+
'ai.prompt_tokens.used': 10,
119+
'ai.total_tokens.used': 30,
120+
}),
121+
description: 'generateText.doGenerate',
122+
op: 'ai.run.doGenerate',
123+
origin: 'auto.vercelai.otel',
124+
status: 'ok',
125+
}),
126+
]),
127+
};
128+
129+
createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start(done);
130+
});
131+
});

packages/node/src/integrations/tracing/index.ts

+3
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import { instrumentNest, nestIntegration } from './nest/nest';
1919
import { instrumentPostgres, postgresIntegration } from './postgres';
2020
import { instrumentRedis, redisIntegration } from './redis';
2121
import { instrumentTedious, tediousIntegration } from './tedious';
22+
import { instrumentVercelAi, vercelAIIntegration } from './vercelai';
2223

2324
/**
2425
* With OTEL, all performance integrations will be added, as OTEL only initializes them when the patched package is actually required.
@@ -48,6 +49,7 @@ export function getAutoPerformanceIntegrations(): Integration[] {
4849
kafkaIntegration(),
4950
amqplibIntegration(),
5051
lruMemoizerIntegration(),
52+
vercelAIIntegration(),
5153
];
5254
}
5355

@@ -78,5 +80,6 @@ export function getOpenTelemetryInstrumentationToPreload(): (((options?: any) =>
7880
instrumentTedious,
7981
instrumentGenericPool,
8082
instrumentAmqplib,
83+
instrumentVercelAi,
8184
];
8285
}

0 commit comments

Comments
 (0)