File tree 4 files changed +36
-14
lines changed
instrumentation-vertexai/src
4 files changed +36
-14
lines changed Original file line number Diff line number Diff line change @@ -157,10 +157,22 @@ export class VertexAIInstrumentation extends InstrumentationBase {
157
157
}
158
158
159
159
if ( this . _shouldSendPrompts ( ) && "contents" in params ) {
160
- attributes [ `${ SpanAttributes . LLM_PROMPTS } .0.role` ] =
161
- params . contents [ 0 ] . role ?? "user" ;
162
- attributes [ `${ SpanAttributes . LLM_PROMPTS } .0.content` ] =
163
- this . _formatPartsData ( params . contents [ 0 ] . parts ) ;
160
+ let i = 0 ;
161
+
162
+ if ( instance [ "systemInstruction" ] ) {
163
+ attributes [ `${ SpanAttributes . LLM_PROMPTS } .${ i } .role` ] = "system" ;
164
+ attributes [ `${ SpanAttributes . LLM_PROMPTS } .${ i } .content` ] =
165
+ this . _formatPartsData ( instance [ "systemInstruction" ] . parts ) ;
166
+
167
+ i ++ ;
168
+ }
169
+
170
+ params . contents . forEach ( ( content , j ) => {
171
+ attributes [ `${ SpanAttributes . LLM_PROMPTS } .${ i + j } .role` ] =
172
+ content . role ?? "user" ;
173
+ attributes [ `${ SpanAttributes . LLM_PROMPTS } .${ i + j } .content` ] =
174
+ this . _formatPartsData ( content . parts ) ;
175
+ } ) ;
164
176
}
165
177
} catch ( e ) {
166
178
this . _diag . debug ( e ) ;
Original file line number Diff line number Diff line change 36
36
"@anthropic-ai/sdk" : " ^0.27.1" ,
37
37
"@aws-sdk/client-bedrock-runtime" : " ^3.499.0" ,
38
38
"@azure/openai" : " ^1.0.0-beta.11" ,
39
- "@google-cloud/aiplatform" : " ^3.10 .0" ,
40
- "@google-cloud/vertexai" : " ^1.2 .0" ,
39
+ "@google-cloud/aiplatform" : " ^3.32 .0" ,
40
+ "@google-cloud/vertexai" : " ^1.9 .0" ,
41
41
"@langchain/community" : " ^0.2.31" ,
42
42
"@pinecone-database/pinecone" : " ^2.0.1" ,
43
43
"@traceloop/node-server-sdk" : " *" ,
Original file line number Diff line number Diff line change @@ -20,6 +20,10 @@ async function createNonStreamingContent() {
20
20
// Instantiate the model
21
21
const generativeModel = vertexAI . getGenerativeModel ( {
22
22
model : "gemini-1.5-flash" ,
23
+ systemInstruction : {
24
+ role : "system" ,
25
+ parts : [ { text : "You are a helpful assistant" } ] ,
26
+ } ,
23
27
} ) ;
24
28
25
29
const request = {
@@ -53,6 +57,10 @@ async function createStreamingContent() {
53
57
// Instantiate the model
54
58
const generativeModel = vertexAI . getGenerativeModel ( {
55
59
model : "gemini-1.5-flash" ,
60
+ systemInstruction : {
61
+ role : "system" ,
62
+ parts : [ { text : "You are a helpful assistant" } ] ,
63
+ } ,
56
64
} ) ;
57
65
58
66
const request = {
You can’t perform that action at this time.
0 commit comments