Skip to content

Commit bdc3543

Browse files
committed
feat(node): Add @vercel/ai instrumentation
1 parent 23e3783 commit bdc3543

File tree

8 files changed

+624
-6
lines changed

8 files changed

+624
-6
lines changed

dev-packages/node-integration-tests/package.json

+1
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
"@types/mongodb": "^3.6.20",
3939
"@types/mysql": "^2.15.21",
4040
"@types/pg": "^8.6.5",
41+
"ai": "^4.0.6",
4142
"amqplib": "^0.10.4",
4243
"apollo-server": "^3.11.1",
4344
"axios": "^1.7.7",
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
const { loggingTransport } = require('@sentry-internal/node-integration-tests');
2+
const Sentry = require('@sentry/node');
3+
4+
Sentry.init({
5+
debug: true,
6+
dsn: 'https://[email protected]/1337',
7+
release: '1.0',
8+
tracesSampleRate: 1.0,
9+
transport: loggingTransport,
10+
});
11+
12+
const { generateText } = require('ai');
13+
const { MockLanguageModelV1 } = require('ai/test');
14+
15+
async function run() {
16+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
17+
await generateText({
18+
model: new MockLanguageModelV1({
19+
doGenerate: async () => ({
20+
rawCall: { rawPrompt: null, rawSettings: {} },
21+
finishReason: 'stop',
22+
usage: { promptTokens: 10, completionTokens: 20 },
23+
text: 'First span here!',
24+
}),
25+
}),
26+
prompt: 'Where is the first span?',
27+
});
28+
29+
// This span should have input and output prompts attached because telemetry is explicitly enabled.
30+
await generateText({
31+
experimental_telemetry: { isEnabled: true },
32+
model: new MockLanguageModelV1({
33+
doGenerate: async () => ({
34+
rawCall: { rawPrompt: null, rawSettings: {} },
35+
finishReason: 'stop',
36+
usage: { promptTokens: 10, completionTokens: 20 },
37+
text: 'Second span here!',
38+
}),
39+
}),
40+
prompt: 'Where is the second span?',
41+
});
42+
43+
// This span should not be captured because we've disabled telemetry
44+
await generateText({
45+
experimental_telemetry: { isEnabled: false },
46+
model: new MockLanguageModelV1({
47+
doGenerate: async () => ({
48+
rawCall: { rawPrompt: null, rawSettings: {} },
49+
finishReason: 'stop',
50+
usage: { promptTokens: 10, completionTokens: 20 },
51+
text: 'Third span here!',
52+
}),
53+
}),
54+
prompt: 'Where is the third span?',
55+
});
56+
});
57+
}
58+
59+
run();
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
import { cleanupChildProcesses, createRunner } from '../../../utils/runner';
2+
3+
describe('ai', () => {
4+
afterAll(() => {
5+
cleanupChildProcesses();
6+
});
7+
8+
test('creates ai related spans', done => {
9+
const EXPECTED_TRANSACTION = {
10+
transaction: 'main',
11+
spans: expect.arrayContaining([
12+
expect.objectContaining({
13+
data: expect.objectContaining({
14+
'ai.completion_tokens.used': 20,
15+
'ai.model.id': 'mock-model-id',
16+
'ai.model.provider': 'mock-provider',
17+
'ai.model_id': 'mock-model-id',
18+
'ai.operationId': 'ai.generateText',
19+
'ai.pipeline.name': 'generateText',
20+
'ai.prompt_tokens.used': 10,
21+
'ai.response.finishReason': 'stop',
22+
'ai.settings.maxRetries': 2,
23+
'ai.settings.maxSteps': 1,
24+
'ai.streaming': false,
25+
'ai.tokens.used': 30,
26+
'ai.usage.completionTokens': 20,
27+
'ai.usage.promptTokens': 10,
28+
'operation.name': 'ai.generateText',
29+
'sentry.op': 'ai.pipeline.generateText',
30+
'sentry.origin': 'auto.vercelai.otel',
31+
}),
32+
description: 'generateText',
33+
op: 'ai.pipeline.generateText',
34+
origin: 'auto.vercelai.otel',
35+
status: 'ok',
36+
}),
37+
expect.objectContaining({
38+
data: expect.objectContaining({
39+
'sentry.origin': 'auto.vercelai.otel',
40+
'sentry.op': 'ai.run.doGenerate',
41+
'operation.name': 'ai.generateText.doGenerate',
42+
'ai.operationId': 'ai.generateText.doGenerate',
43+
'ai.model.provider': 'mock-provider',
44+
'ai.model.id': 'mock-model-id',
45+
'ai.settings.maxRetries': 2,
46+
'gen_ai.system': 'mock-provider',
47+
'gen_ai.request.model': 'mock-model-id',
48+
'ai.pipeline.name': 'generateText.doGenerate',
49+
'ai.model_id': 'mock-model-id',
50+
'ai.streaming': false,
51+
'ai.response.finishReason': 'stop',
52+
'ai.response.model': 'mock-model-id',
53+
'ai.usage.promptTokens': 10,
54+
'ai.usage.completionTokens': 20,
55+
'gen_ai.response.finish_reasons': ['stop'],
56+
'gen_ai.usage.input_tokens': 10,
57+
'gen_ai.usage.output_tokens': 20,
58+
'ai.completion_tokens.used': 20,
59+
'ai.prompt_tokens.used': 10,
60+
'ai.tokens.used': 30,
61+
}),
62+
description: 'generateText.doGenerate',
63+
op: 'ai.run.doGenerate',
64+
origin: 'auto.vercelai.otel',
65+
status: 'ok',
66+
}),
67+
expect.objectContaining({
68+
data: expect.objectContaining({
69+
'ai.completion_tokens.used': 20,
70+
'ai.model.id': 'mock-model-id',
71+
'ai.model.provider': 'mock-provider',
72+
'ai.model_id': 'mock-model-id',
73+
'ai.prompt': '{"prompt":"Where is the second span?"}',
74+
'ai.operationId': 'ai.generateText',
75+
'ai.pipeline.name': 'generateText',
76+
'ai.prompt_tokens.used': 10,
77+
'ai.response.finishReason': 'stop',
78+
'ai.input_messages': '{"prompt":"Where is the second span?"}',
79+
'ai.settings.maxRetries': 2,
80+
'ai.settings.maxSteps': 1,
81+
'ai.streaming': false,
82+
'ai.tokens.used': 30,
83+
'ai.usage.completionTokens': 20,
84+
'ai.usage.promptTokens': 10,
85+
'operation.name': 'ai.generateText',
86+
'sentry.op': 'ai.pipeline.generateText',
87+
'sentry.origin': 'auto.vercelai.otel',
88+
}),
89+
description: 'generateText',
90+
op: 'ai.pipeline.generateText',
91+
origin: 'auto.vercelai.otel',
92+
status: 'ok',
93+
}),
94+
expect.objectContaining({
95+
data: expect.objectContaining({
96+
'sentry.origin': 'auto.vercelai.otel',
97+
'sentry.op': 'ai.run.doGenerate',
98+
'operation.name': 'ai.generateText.doGenerate',
99+
'ai.operationId': 'ai.generateText.doGenerate',
100+
'ai.model.provider': 'mock-provider',
101+
'ai.model.id': 'mock-model-id',
102+
'ai.settings.maxRetries': 2,
103+
'gen_ai.system': 'mock-provider',
104+
'gen_ai.request.model': 'mock-model-id',
105+
'ai.pipeline.name': 'generateText.doGenerate',
106+
'ai.model_id': 'mock-model-id',
107+
'ai.streaming': false,
108+
'ai.response.finishReason': 'stop',
109+
'ai.response.model': 'mock-model-id',
110+
'ai.usage.promptTokens': 10,
111+
'ai.usage.completionTokens': 20,
112+
'gen_ai.response.finish_reasons': ['stop'],
113+
'gen_ai.usage.input_tokens': 10,
114+
'gen_ai.usage.output_tokens': 20,
115+
'ai.completion_tokens.used': 20,
116+
'ai.prompt_tokens.used': 10,
117+
'ai.tokens.used': 30,
118+
}),
119+
description: 'generateText.doGenerate',
120+
op: 'ai.run.doGenerate',
121+
origin: 'auto.vercelai.otel',
122+
status: 'ok',
123+
}),
124+
]),
125+
};
126+
127+
createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start(done);
128+
});
129+
});

packages/node/src/integrations/tracing/index.ts

+3
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import { instrumentNest, nestIntegration } from './nest/nest';
1919
import { instrumentPostgres, postgresIntegration } from './postgres';
2020
import { instrumentRedis, redisIntegration } from './redis';
2121
import { instrumentTedious, tediousIntegration } from './tedious';
22+
import { instrumentVercelAi, vercelAIIntegration } from './vercelai';
2223

2324
/**
2425
* With OTEL, all performance integrations will be added, as OTEL only initializes them when the patched package is actually required.
@@ -48,6 +49,7 @@ export function getAutoPerformanceIntegrations(): Integration[] {
4849
kafkaIntegration(),
4950
amqplibIntegration(),
5051
lruMemoizerIntegration(),
52+
vercelAIIntegration(),
5153
];
5254
}
5355

@@ -78,5 +80,6 @@ export function getOpenTelemetryInstrumentationToPreload(): (((options?: any) =>
7880
instrumentTedious,
7981
instrumentGenericPool,
8082
instrumentAmqplib,
83+
instrumentVercelAi,
8184
];
8285
}

0 commit comments

Comments
 (0)