1
+ import { join } from 'node:path' ;
1
2
import { afterAll , describe , expect , test } from 'vitest' ;
2
3
import { cleanupChildProcesses , createRunner } from '../../../utils/runner' ;
3
4
@@ -7,7 +8,7 @@ describe('ai', () => {
7
8
cleanupChildProcesses ( ) ;
8
9
} ) ;
9
10
10
- test ( 'creates ai related spans' , async ( ) => {
11
+ test ( 'creates ai related spans - cjs ' , async ( ) => {
11
12
const EXPECTED_TRANSACTION = {
12
13
transaction : 'main' ,
13
14
spans : expect . arrayContaining ( [
@@ -128,4 +129,130 @@ describe('ai', () => {
128
129
129
130
await createRunner ( __dirname , 'scenario.js' ) . expect ( { transaction : EXPECTED_TRANSACTION } ) . start ( ) . completed ( ) ;
130
131
} ) ;
132
+
133
+ test ( 'creates ai related spans - esm' , async ( ) => {
134
+ const EXPECTED_TRANSACTION = {
135
+ transaction : 'main' ,
136
+ spans : expect . arrayContaining ( [
137
+ expect . objectContaining ( {
138
+ data : expect . objectContaining ( {
139
+ 'ai.completion_tokens.used' : 20 ,
140
+ 'ai.model.id' : 'mock-model-id' ,
141
+ 'ai.model.provider' : 'mock-provider' ,
142
+ 'ai.model_id' : 'mock-model-id' ,
143
+ 'ai.operationId' : 'ai.generateText' ,
144
+ 'ai.pipeline.name' : 'generateText' ,
145
+ 'ai.prompt_tokens.used' : 10 ,
146
+ 'ai.response.finishReason' : 'stop' ,
147
+ 'ai.settings.maxRetries' : 2 ,
148
+ 'ai.settings.maxSteps' : 1 ,
149
+ 'ai.streaming' : false ,
150
+ 'ai.total_tokens.used' : 30 ,
151
+ 'ai.usage.completionTokens' : 20 ,
152
+ 'ai.usage.promptTokens' : 10 ,
153
+ 'operation.name' : 'ai.generateText' ,
154
+ 'sentry.op' : 'ai.pipeline.generateText' ,
155
+ 'sentry.origin' : 'auto.vercelai.otel' ,
156
+ } ) ,
157
+ description : 'generateText' ,
158
+ op : 'ai.pipeline.generateText' ,
159
+ origin : 'auto.vercelai.otel' ,
160
+ status : 'ok' ,
161
+ } ) ,
162
+ expect . objectContaining ( {
163
+ data : expect . objectContaining ( {
164
+ 'sentry.origin' : 'auto.vercelai.otel' ,
165
+ 'sentry.op' : 'ai.run.doGenerate' ,
166
+ 'operation.name' : 'ai.generateText.doGenerate' ,
167
+ 'ai.operationId' : 'ai.generateText.doGenerate' ,
168
+ 'ai.model.provider' : 'mock-provider' ,
169
+ 'ai.model.id' : 'mock-model-id' ,
170
+ 'ai.settings.maxRetries' : 2 ,
171
+ 'gen_ai.system' : 'mock-provider' ,
172
+ 'gen_ai.request.model' : 'mock-model-id' ,
173
+ 'ai.pipeline.name' : 'generateText.doGenerate' ,
174
+ 'ai.model_id' : 'mock-model-id' ,
175
+ 'ai.streaming' : false ,
176
+ 'ai.response.finishReason' : 'stop' ,
177
+ 'ai.response.model' : 'mock-model-id' ,
178
+ 'ai.usage.promptTokens' : 10 ,
179
+ 'ai.usage.completionTokens' : 20 ,
180
+ 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
181
+ 'gen_ai.usage.input_tokens' : 10 ,
182
+ 'gen_ai.usage.output_tokens' : 20 ,
183
+ 'ai.completion_tokens.used' : 20 ,
184
+ 'ai.prompt_tokens.used' : 10 ,
185
+ 'ai.total_tokens.used' : 30 ,
186
+ } ) ,
187
+ description : 'generateText.doGenerate' ,
188
+ op : 'ai.run.doGenerate' ,
189
+ origin : 'auto.vercelai.otel' ,
190
+ status : 'ok' ,
191
+ } ) ,
192
+ expect . objectContaining ( {
193
+ data : expect . objectContaining ( {
194
+ 'ai.completion_tokens.used' : 20 ,
195
+ 'ai.model.id' : 'mock-model-id' ,
196
+ 'ai.model.provider' : 'mock-provider' ,
197
+ 'ai.model_id' : 'mock-model-id' ,
198
+ 'ai.prompt' : '{"prompt":"Where is the second span?"}' ,
199
+ 'ai.operationId' : 'ai.generateText' ,
200
+ 'ai.pipeline.name' : 'generateText' ,
201
+ 'ai.prompt_tokens.used' : 10 ,
202
+ 'ai.response.finishReason' : 'stop' ,
203
+ 'ai.input_messages' : '{"prompt":"Where is the second span?"}' ,
204
+ 'ai.settings.maxRetries' : 2 ,
205
+ 'ai.settings.maxSteps' : 1 ,
206
+ 'ai.streaming' : false ,
207
+ 'ai.total_tokens.used' : 30 ,
208
+ 'ai.usage.completionTokens' : 20 ,
209
+ 'ai.usage.promptTokens' : 10 ,
210
+ 'operation.name' : 'ai.generateText' ,
211
+ 'sentry.op' : 'ai.pipeline.generateText' ,
212
+ 'sentry.origin' : 'auto.vercelai.otel' ,
213
+ } ) ,
214
+ description : 'generateText' ,
215
+ op : 'ai.pipeline.generateText' ,
216
+ origin : 'auto.vercelai.otel' ,
217
+ status : 'ok' ,
218
+ } ) ,
219
+ expect . objectContaining ( {
220
+ data : expect . objectContaining ( {
221
+ 'sentry.origin' : 'auto.vercelai.otel' ,
222
+ 'sentry.op' : 'ai.run.doGenerate' ,
223
+ 'operation.name' : 'ai.generateText.doGenerate' ,
224
+ 'ai.operationId' : 'ai.generateText.doGenerate' ,
225
+ 'ai.model.provider' : 'mock-provider' ,
226
+ 'ai.model.id' : 'mock-model-id' ,
227
+ 'ai.settings.maxRetries' : 2 ,
228
+ 'gen_ai.system' : 'mock-provider' ,
229
+ 'gen_ai.request.model' : 'mock-model-id' ,
230
+ 'ai.pipeline.name' : 'generateText.doGenerate' ,
231
+ 'ai.model_id' : 'mock-model-id' ,
232
+ 'ai.streaming' : false ,
233
+ 'ai.response.finishReason' : 'stop' ,
234
+ 'ai.response.model' : 'mock-model-id' ,
235
+ 'ai.usage.promptTokens' : 10 ,
236
+ 'ai.usage.completionTokens' : 20 ,
237
+ 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
238
+ 'gen_ai.usage.input_tokens' : 10 ,
239
+ 'gen_ai.usage.output_tokens' : 20 ,
240
+ 'ai.completion_tokens.used' : 20 ,
241
+ 'ai.prompt_tokens.used' : 10 ,
242
+ 'ai.total_tokens.used' : 30 ,
243
+ } ) ,
244
+ description : 'generateText.doGenerate' ,
245
+ op : 'ai.run.doGenerate' ,
246
+ origin : 'auto.vercelai.otel' ,
247
+ status : 'ok' ,
248
+ } ) ,
249
+ ] ) ,
250
+ } ;
251
+
252
+ await createRunner ( __dirname , 'scenario.mjs' )
253
+ . withFlags ( '--import' , join ( __dirname , 'instrument.mjs' ) )
254
+ . expect ( { transaction : EXPECTED_TRANSACTION } )
255
+ . start ( )
256
+ . completed ( ) ;
257
+ } ) ;
131
258
} ) ;
0 commit comments