Skip to content

Commit 772d3b0

Browse files
committed
further refactors
1 parent 8c3e1da commit 772d3b0

File tree

2 files changed

+33
-37
lines changed
  • dev-packages/node-integration-tests/suites/tracing/ai
  • packages/node/src/integrations/tracing/vercelai

2 files changed

+33
-37
lines changed

dev-packages/node-integration-tests/suites/tracing/ai/test.ts

Lines changed: 24 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,18 @@ describe('ai', () => {
1212
spans: expect.arrayContaining([
1313
expect.objectContaining({
1414
data: expect.objectContaining({
15-
'ai.completion_tokens.used': 20,
1615
'ai.model.id': 'mock-model-id',
1716
'ai.model.provider': 'mock-provider',
18-
'ai.model_id': 'mock-model-id',
1917
'ai.operationId': 'ai.generateText',
2018
'ai.pipeline.name': 'generateText',
21-
'ai.prompt_tokens.used': 10,
2219
'ai.response.finishReason': 'stop',
2320
'ai.settings.maxRetries': 2,
2421
'ai.settings.maxSteps': 1,
2522
'ai.streaming': false,
26-
'ai.total_tokens.used': 30,
27-
'ai.usage.completionTokens': 20,
28-
'ai.usage.promptTokens': 10,
23+
'gen_ai.response.model': 'mock-model-id',
24+
'gen_ai.usage.input_tokens': 10,
25+
'gen_ai.usage.output_tokens': 20,
26+
'gen_ai.usage.total_tokens': 30,
2927
'operation.name': 'ai.generateText',
3028
'sentry.op': 'ai.pipeline.generateText',
3129
'sentry.origin': 'auto.vercelai.otel',
@@ -47,18 +45,17 @@ describe('ai', () => {
4745
'gen_ai.system': 'mock-provider',
4846
'gen_ai.request.model': 'mock-model-id',
4947
'ai.pipeline.name': 'generateText.doGenerate',
50-
'ai.model_id': 'mock-model-id',
5148
'ai.streaming': false,
5249
'ai.response.finishReason': 'stop',
5350
'ai.response.model': 'mock-model-id',
54-
'ai.usage.promptTokens': 10,
55-
'ai.usage.completionTokens': 20,
51+
'ai.response.id': expect.any(String),
52+
'ai.response.timestamp': expect.any(String),
5653
'gen_ai.response.finish_reasons': ['stop'],
5754
'gen_ai.usage.input_tokens': 10,
5855
'gen_ai.usage.output_tokens': 20,
59-
'ai.completion_tokens.used': 20,
60-
'ai.prompt_tokens.used': 10,
61-
'ai.total_tokens.used': 30,
56+
'gen_ai.response.id': expect.any(String),
57+
'gen_ai.response.model': 'mock-model-id',
58+
'gen_ai.usage.total_tokens': 30,
6259
}),
6360
description: 'generateText.doGenerate',
6461
op: 'ai.run.doGenerate',
@@ -67,22 +64,21 @@ describe('ai', () => {
6764
}),
6865
expect.objectContaining({
6966
data: expect.objectContaining({
70-
'ai.completion_tokens.used': 20,
7167
'ai.model.id': 'mock-model-id',
7268
'ai.model.provider': 'mock-provider',
73-
'ai.model_id': 'mock-model-id',
74-
'ai.prompt': '{"prompt":"Where is the second span?"}',
7569
'ai.operationId': 'ai.generateText',
7670
'ai.pipeline.name': 'generateText',
77-
'ai.prompt_tokens.used': 10,
71+
'ai.prompt': '{"prompt":"Where is the second span?"}',
7872
'ai.response.finishReason': 'stop',
79-
'ai.input_messages': '{"prompt":"Where is the second span?"}',
73+
'ai.response.text': expect.any(String),
8074
'ai.settings.maxRetries': 2,
8175
'ai.settings.maxSteps': 1,
8276
'ai.streaming': false,
83-
'ai.total_tokens.used': 30,
84-
'ai.usage.completionTokens': 20,
85-
'ai.usage.promptTokens': 10,
77+
'gen_ai.prompt': '{"prompt":"Where is the second span?"}',
78+
'gen_ai.response.model': 'mock-model-id',
79+
'gen_ai.usage.input_tokens': 10,
80+
'gen_ai.usage.output_tokens': 20,
81+
'gen_ai.usage.total_tokens': 30,
8682
'operation.name': 'ai.generateText',
8783
'sentry.op': 'ai.pipeline.generateText',
8884
'sentry.origin': 'auto.vercelai.otel',
@@ -104,18 +100,20 @@ describe('ai', () => {
104100
'gen_ai.system': 'mock-provider',
105101
'gen_ai.request.model': 'mock-model-id',
106102
'ai.pipeline.name': 'generateText.doGenerate',
107-
'ai.model_id': 'mock-model-id',
108103
'ai.streaming': false,
109104
'ai.response.finishReason': 'stop',
110105
'ai.response.model': 'mock-model-id',
111-
'ai.usage.promptTokens': 10,
112-
'ai.usage.completionTokens': 20,
106+
'ai.response.id': expect.any(String),
107+
'ai.response.text': expect.any(String),
108+
'ai.response.timestamp': expect.any(String),
109+
'ai.prompt.format': expect.any(String),
110+
'ai.prompt.messages': expect.any(String),
113111
'gen_ai.response.finish_reasons': ['stop'],
114112
'gen_ai.usage.input_tokens': 10,
115113
'gen_ai.usage.output_tokens': 20,
116-
'ai.completion_tokens.used': 20,
117-
'ai.prompt_tokens.used': 10,
118-
'ai.total_tokens.used': 30,
114+
'gen_ai.response.id': expect.any(String),
115+
'gen_ai.response.model': 'mock-model-id',
116+
'gen_ai.usage.total_tokens': 30,
119117
}),
120118
description: 'generateText.doGenerate',
121119
op: 'ai.run.doGenerate',

packages/node/src/integrations/tracing/vercelai/index.ts

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -142,24 +142,22 @@ const _vercelAIIntegration = (() => {
142142
continue;
143143
}
144144

145-
if (
146-
attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] != undefined &&
147-
attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] != undefined
148-
) {
145+
if (attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] != undefined) {
149146
attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE];
147+
// eslint-disable-next-line @typescript-eslint/no-dynamic-delete
148+
delete attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE];
150149
}
151-
if (
152-
attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] != undefined &&
153-
attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] != undefined
154-
) {
150+
if (attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] != undefined) {
155151
attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE];
152+
// eslint-disable-next-line @typescript-eslint/no-dynamic-delete
153+
delete attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE];
156154
}
157155
if (
158-
typeof attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] == 'number' &&
159-
typeof attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] == 'number'
156+
typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' &&
157+
typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number'
160158
) {
161159
attributes['gen_ai.usage.total_tokens'] =
162-
attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] + attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE];
160+
attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE];
163161
}
164162
}
165163
}

0 commit comments

Comments
 (0)