Skip to content

Commit f9b14f4

Browse files
committed
fix: Vercel ai import-in-the-middle patching
1 parent f0c9458 commit f9b14f4

File tree

4 files changed

+206
-8
lines changed

4 files changed

+206
-8
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
transport: loggingTransport,
9+
});
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import * as Sentry from '@sentry/node';
2+
import { generateText } from 'ai';
3+
import { MockLanguageModelV1 } from 'ai/test';
4+
5+
async function run() {
6+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
7+
await generateText({
8+
model: new MockLanguageModelV1({
9+
doGenerate: async () => ({
10+
rawCall: { rawPrompt: null, rawSettings: {} },
11+
finishReason: 'stop',
12+
usage: { promptTokens: 10, completionTokens: 20 },
13+
text: 'First span here!',
14+
}),
15+
}),
16+
prompt: 'Where is the first span?',
17+
});
18+
19+
// This span should have input and output prompts attached because telemetry is explicitly enabled.
20+
await generateText({
21+
experimental_telemetry: { isEnabled: true },
22+
model: new MockLanguageModelV1({
23+
doGenerate: async () => ({
24+
rawCall: { rawPrompt: null, rawSettings: {} },
25+
finishReason: 'stop',
26+
usage: { promptTokens: 10, completionTokens: 20 },
27+
text: 'Second span here!',
28+
}),
29+
}),
30+
prompt: 'Where is the second span?',
31+
});
32+
33+
// This span should not be captured because we've disabled telemetry
34+
await generateText({
35+
experimental_telemetry: { isEnabled: false },
36+
model: new MockLanguageModelV1({
37+
doGenerate: async () => ({
38+
rawCall: { rawPrompt: null, rawSettings: {} },
39+
finishReason: 'stop',
40+
usage: { promptTokens: 10, completionTokens: 20 },
41+
text: 'Third span here!',
42+
}),
43+
}),
44+
prompt: 'Where is the third span?',
45+
});
46+
});
47+
}
48+
49+
run();

dev-packages/node-integration-tests/suites/tracing/ai/test.ts

+128-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import { join } from 'node:path';
12
import { afterAll, describe, expect, test } from 'vitest';
23
import { cleanupChildProcesses, createRunner } from '../../../utils/runner';
34

@@ -7,7 +8,7 @@ describe('ai', () => {
78
cleanupChildProcesses();
89
});
910

10-
test('creates ai related spans', async () => {
11+
test('creates ai related spans - cjs', async () => {
1112
const EXPECTED_TRANSACTION = {
1213
transaction: 'main',
1314
spans: expect.arrayContaining([
@@ -128,4 +129,130 @@ describe('ai', () => {
128129

129130
await createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start().completed();
130131
});
132+
133+
test('creates ai related spans - esm', async () => {
134+
const EXPECTED_TRANSACTION = {
135+
transaction: 'main',
136+
spans: expect.arrayContaining([
137+
expect.objectContaining({
138+
data: expect.objectContaining({
139+
'ai.completion_tokens.used': 20,
140+
'ai.model.id': 'mock-model-id',
141+
'ai.model.provider': 'mock-provider',
142+
'ai.model_id': 'mock-model-id',
143+
'ai.operationId': 'ai.generateText',
144+
'ai.pipeline.name': 'generateText',
145+
'ai.prompt_tokens.used': 10,
146+
'ai.response.finishReason': 'stop',
147+
'ai.settings.maxRetries': 2,
148+
'ai.settings.maxSteps': 1,
149+
'ai.streaming': false,
150+
'ai.total_tokens.used': 30,
151+
'ai.usage.completionTokens': 20,
152+
'ai.usage.promptTokens': 10,
153+
'operation.name': 'ai.generateText',
154+
'sentry.op': 'ai.pipeline.generateText',
155+
'sentry.origin': 'auto.vercelai.otel',
156+
}),
157+
description: 'generateText',
158+
op: 'ai.pipeline.generateText',
159+
origin: 'auto.vercelai.otel',
160+
status: 'ok',
161+
}),
162+
expect.objectContaining({
163+
data: expect.objectContaining({
164+
'sentry.origin': 'auto.vercelai.otel',
165+
'sentry.op': 'ai.run.doGenerate',
166+
'operation.name': 'ai.generateText.doGenerate',
167+
'ai.operationId': 'ai.generateText.doGenerate',
168+
'ai.model.provider': 'mock-provider',
169+
'ai.model.id': 'mock-model-id',
170+
'ai.settings.maxRetries': 2,
171+
'gen_ai.system': 'mock-provider',
172+
'gen_ai.request.model': 'mock-model-id',
173+
'ai.pipeline.name': 'generateText.doGenerate',
174+
'ai.model_id': 'mock-model-id',
175+
'ai.streaming': false,
176+
'ai.response.finishReason': 'stop',
177+
'ai.response.model': 'mock-model-id',
178+
'ai.usage.promptTokens': 10,
179+
'ai.usage.completionTokens': 20,
180+
'gen_ai.response.finish_reasons': ['stop'],
181+
'gen_ai.usage.input_tokens': 10,
182+
'gen_ai.usage.output_tokens': 20,
183+
'ai.completion_tokens.used': 20,
184+
'ai.prompt_tokens.used': 10,
185+
'ai.total_tokens.used': 30,
186+
}),
187+
description: 'generateText.doGenerate',
188+
op: 'ai.run.doGenerate',
189+
origin: 'auto.vercelai.otel',
190+
status: 'ok',
191+
}),
192+
expect.objectContaining({
193+
data: expect.objectContaining({
194+
'ai.completion_tokens.used': 20,
195+
'ai.model.id': 'mock-model-id',
196+
'ai.model.provider': 'mock-provider',
197+
'ai.model_id': 'mock-model-id',
198+
'ai.prompt': '{"prompt":"Where is the second span?"}',
199+
'ai.operationId': 'ai.generateText',
200+
'ai.pipeline.name': 'generateText',
201+
'ai.prompt_tokens.used': 10,
202+
'ai.response.finishReason': 'stop',
203+
'ai.input_messages': '{"prompt":"Where is the second span?"}',
204+
'ai.settings.maxRetries': 2,
205+
'ai.settings.maxSteps': 1,
206+
'ai.streaming': false,
207+
'ai.total_tokens.used': 30,
208+
'ai.usage.completionTokens': 20,
209+
'ai.usage.promptTokens': 10,
210+
'operation.name': 'ai.generateText',
211+
'sentry.op': 'ai.pipeline.generateText',
212+
'sentry.origin': 'auto.vercelai.otel',
213+
}),
214+
description: 'generateText',
215+
op: 'ai.pipeline.generateText',
216+
origin: 'auto.vercelai.otel',
217+
status: 'ok',
218+
}),
219+
expect.objectContaining({
220+
data: expect.objectContaining({
221+
'sentry.origin': 'auto.vercelai.otel',
222+
'sentry.op': 'ai.run.doGenerate',
223+
'operation.name': 'ai.generateText.doGenerate',
224+
'ai.operationId': 'ai.generateText.doGenerate',
225+
'ai.model.provider': 'mock-provider',
226+
'ai.model.id': 'mock-model-id',
227+
'ai.settings.maxRetries': 2,
228+
'gen_ai.system': 'mock-provider',
229+
'gen_ai.request.model': 'mock-model-id',
230+
'ai.pipeline.name': 'generateText.doGenerate',
231+
'ai.model_id': 'mock-model-id',
232+
'ai.streaming': false,
233+
'ai.response.finishReason': 'stop',
234+
'ai.response.model': 'mock-model-id',
235+
'ai.usage.promptTokens': 10,
236+
'ai.usage.completionTokens': 20,
237+
'gen_ai.response.finish_reasons': ['stop'],
238+
'gen_ai.usage.input_tokens': 10,
239+
'gen_ai.usage.output_tokens': 20,
240+
'ai.completion_tokens.used': 20,
241+
'ai.prompt_tokens.used': 10,
242+
'ai.total_tokens.used': 30,
243+
}),
244+
description: 'generateText.doGenerate',
245+
op: 'ai.run.doGenerate',
246+
origin: 'auto.vercelai.otel',
247+
status: 'ok',
248+
}),
249+
]),
250+
};
251+
252+
await createRunner(__dirname, 'scenario.mjs')
253+
.withFlags('--import', join(__dirname, 'instrument.mjs'))
254+
.expect({ transaction: EXPECTED_TRANSACTION })
255+
.start()
256+
.completed();
257+
});
131258
});

packages/node/src/integrations/tracing/vercelai/instrumentation.ts

+20-7
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase {
6666
this._callbacks.forEach(callback => callback());
6767
this._callbacks = [];
6868

69-
function generatePatch(name: string) {
69+
function generatePatch(originalMethod: (...args: MethodArgs) => unknown) {
7070
return (...args: MethodArgs) => {
7171
const existingExperimentalTelemetry = args[0].experimental_telemetry || {};
7272
const isEnabled = existingExperimentalTelemetry.isEnabled;
@@ -83,15 +83,28 @@ export class SentryVercelAiInstrumentation extends InstrumentationBase {
8383
}
8484

8585
// @ts-expect-error we know that the method exists
86-
return moduleExports[name].apply(this, args);
86+
return originalMethod.apply(this, args);
8787
};
8888
}
8989

90-
const patchedModuleExports = INSTRUMENTED_METHODS.reduce((acc, curr) => {
91-
acc[curr] = generatePatch(curr);
92-
return acc;
93-
}, {} as PatchedModuleExports);
90+
// Is this an ESM module?
91+
// https://tc39.es/ecma262/#sec-module-namespace-objects
92+
if (Object.prototype.toString.call(moduleExports) === '[object Module]') {
93+
// In ESM we take the usual route and just replace the exports we want to instrument
94+
for (const method of INSTRUMENTED_METHODS) {
95+
moduleExports[method] = generatePatch(moduleExports[method]);
96+
}
9497

95-
return { ...moduleExports, ...patchedModuleExports };
98+
return moduleExports;
99+
} else {
100+
// In CJS we can't replace the exports in the original module because they
101+
// don't have setters, so we create a new object with the same properties
102+
const patchedModuleExports = INSTRUMENTED_METHODS.reduce((acc, curr) => {
103+
acc[curr] = generatePatch(moduleExports[curr]);
104+
return acc;
105+
}, {} as PatchedModuleExports);
106+
107+
return { ...moduleExports, ...patchedModuleExports };
108+
}
96109
}
97110
}

0 commit comments

Comments
 (0)