Skip to content

Commit d35030d

Browse files
mydeacursoragent
andauthored
feat(node): Automatically enable vercelAiIntegration when ai module is detected (#16565)
This PR improves the handling of the `ai` instrumentation to always be enabled when we detect the `ai` module is installed. For this, we leverage the `modulesIntegration`. This PR should make usage of ai instrumentation in Next.js "automatically" again - BUT users will have to specific ` experimental_telemetry: { isEnabled: true },` at each call manually for the time being. --------- Co-authored-by: Cursor Agent <[email protected]>
1 parent 2e4d243 commit d35030d

File tree

6 files changed

+202
-6
lines changed

6 files changed

+202
-6
lines changed
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
import { generateText } from 'ai';
2+
import { MockLanguageModelV1 } from 'ai/test';
3+
import { z } from 'zod';
4+
import * as Sentry from '@sentry/nextjs';
5+
6+
export const dynamic = 'force-dynamic';
7+
8+
async function runAITest() {
9+
// First span - telemetry should be enabled automatically but no input/output recorded when sendDefaultPii: true
10+
const result1 = await generateText({
11+
model: new MockLanguageModelV1({
12+
doGenerate: async () => ({
13+
rawCall: { rawPrompt: null, rawSettings: {} },
14+
finishReason: 'stop',
15+
usage: { promptTokens: 10, completionTokens: 20 },
16+
text: 'First span here!',
17+
}),
18+
}),
19+
prompt: 'Where is the first span?',
20+
});
21+
22+
// Second span - explicitly enabled telemetry, should record inputs/outputs
23+
const result2 = await generateText({
24+
experimental_telemetry: { isEnabled: true },
25+
model: new MockLanguageModelV1({
26+
doGenerate: async () => ({
27+
rawCall: { rawPrompt: null, rawSettings: {} },
28+
finishReason: 'stop',
29+
usage: { promptTokens: 10, completionTokens: 20 },
30+
text: 'Second span here!',
31+
}),
32+
}),
33+
prompt: 'Where is the second span?',
34+
});
35+
36+
// Third span - with tool calls and tool results
37+
const result3 = await generateText({
38+
model: new MockLanguageModelV1({
39+
doGenerate: async () => ({
40+
rawCall: { rawPrompt: null, rawSettings: {} },
41+
finishReason: 'tool-calls',
42+
usage: { promptTokens: 15, completionTokens: 25 },
43+
text: 'Tool call completed!',
44+
toolCalls: [
45+
{
46+
toolCallType: 'function',
47+
toolCallId: 'call-1',
48+
toolName: 'getWeather',
49+
args: '{ "location": "San Francisco" }',
50+
},
51+
],
52+
}),
53+
}),
54+
tools: {
55+
getWeather: {
56+
parameters: z.object({ location: z.string() }),
57+
execute: async (args) => {
58+
return `Weather in ${args.location}: Sunny, 72°F`;
59+
},
60+
},
61+
},
62+
prompt: 'What is the weather in San Francisco?',
63+
});
64+
65+
// Fourth span - explicitly disabled telemetry, should not be captured
66+
const result4 = await generateText({
67+
experimental_telemetry: { isEnabled: false },
68+
model: new MockLanguageModelV1({
69+
doGenerate: async () => ({
70+
rawCall: { rawPrompt: null, rawSettings: {} },
71+
finishReason: 'stop',
72+
usage: { promptTokens: 10, completionTokens: 20 },
73+
text: 'Third span here!',
74+
}),
75+
}),
76+
prompt: 'Where is the third span?',
77+
});
78+
79+
return {
80+
result1: result1.text,
81+
result2: result2.text,
82+
result3: result3.text,
83+
result4: result4.text,
84+
};
85+
}
86+
87+
export default async function Page() {
88+
const results = await Sentry.startSpan(
89+
{ op: 'function', name: 'ai-test' },
90+
async () => {
91+
return await runAITest();
92+
}
93+
);
94+
95+
return (
96+
<div>
97+
<h1>AI Test Results</h1>
98+
<pre id="ai-results">{JSON.stringify(results, null, 2)}</pre>
99+
</div>
100+
);
101+
}

dev-packages/e2e-tests/test-applications/nextjs-15/package.json

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,12 @@
1818
"@types/node": "^18.19.1",
1919
"@types/react": "18.0.26",
2020
"@types/react-dom": "18.0.9",
21+
"ai": "^3.0.0",
2122
"next": "15.3.0-canary.33",
2223
"react": "beta",
2324
"react-dom": "beta",
24-
"typescript": "~5.0.0"
25+
"typescript": "~5.0.0",
26+
"zod": "^3.22.4"
2527
},
2628
"devDependencies": {
2729
"@playwright/test": "~1.50.0",

dev-packages/e2e-tests/test-applications/nextjs-15/sentry.server.config.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,7 @@ Sentry.init({
1010
// We are doing a lot of events at once in this test
1111
bufferSize: 1000,
1212
},
13+
integrations: [
14+
Sentry.vercelAIIntegration(),
15+
],
1316
});
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
import { expect, test } from '@playwright/test';
2+
import { waitForTransaction } from '@sentry-internal/test-utils';
3+
4+
test('should create AI spans with correct attributes', async ({ page }) => {
5+
const aiTransactionPromise = waitForTransaction('nextjs-15', async transactionEvent => {
6+
return transactionEvent?.transaction === 'ai-test';
7+
});
8+
9+
await page.goto('/ai-test');
10+
11+
const aiTransaction = await aiTransactionPromise;
12+
13+
expect(aiTransaction).toBeDefined();
14+
expect(aiTransaction.contexts?.trace?.op).toBe('function');
15+
expect(aiTransaction.transaction).toBe('ai-test');
16+
17+
const spans = aiTransaction.spans || [];
18+
19+
// We expect spans for the first 3 AI calls (4th is disabled)
20+
// Each generateText call should create 2 spans: one for the pipeline and one for doGenerate
21+
// Plus a span for the tool call
22+
// TODO: For now, this is sadly not fully working - the monkey patching of the ai package is not working
23+
// because of this, only spans that are manually opted-in at call time will be captured
24+
// this may be fixed by https://github.com/vercel/ai/pull/6716 in the future
25+
const aiPipelineSpans = spans.filter(span => span.op === 'ai.pipeline.generate_text');
26+
const aiGenerateSpans = spans.filter(span => span.op === 'gen_ai.generate_text');
27+
const toolCallSpans = spans.filter(span => span.op === 'gen_ai.execute_tool');
28+
29+
expect(aiPipelineSpans.length).toBeGreaterThanOrEqual(1);
30+
expect(aiGenerateSpans.length).toBeGreaterThanOrEqual(1);
31+
expect(toolCallSpans.length).toBeGreaterThanOrEqual(0);
32+
33+
// First AI call - should have telemetry enabled and record inputs/outputs (sendDefaultPii: true)
34+
/* const firstPipelineSpan = aiPipelineSpans[0];
35+
expect(firstPipelineSpan?.data?.['ai.model.id']).toBe('mock-model-id');
36+
expect(firstPipelineSpan?.data?.['ai.model.provider']).toBe('mock-provider');
37+
expect(firstPipelineSpan?.data?.['ai.prompt']).toContain('Where is the first span?');
38+
expect(firstPipelineSpan?.data?.['ai.response.text']).toBe('First span here!');
39+
expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
40+
expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */
41+
42+
// Second AI call - explicitly enabled telemetry
43+
const secondPipelineSpan = aiPipelineSpans[0];
44+
expect(secondPipelineSpan?.data?.['ai.prompt']).toContain('Where is the second span?');
45+
expect(secondPipelineSpan?.data?.['ai.response.text']).toContain('Second span here!');
46+
47+
// Third AI call - with tool calls
48+
/* const thirdPipelineSpan = aiPipelineSpans[2];
49+
expect(thirdPipelineSpan?.data?.['ai.response.finishReason']).toBe('tool-calls');
50+
expect(thirdPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(15);
51+
expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */
52+
53+
// Tool call span
54+
/* const toolSpan = toolCallSpans[0];
55+
expect(toolSpan?.data?.['ai.toolCall.name']).toBe('getWeather');
56+
expect(toolSpan?.data?.['ai.toolCall.id']).toBe('call-1');
57+
expect(toolSpan?.data?.['ai.toolCall.args']).toContain('San Francisco');
58+
expect(toolSpan?.data?.['ai.toolCall.result']).toContain('Sunny, 72°F'); */
59+
60+
// Verify the fourth call was not captured (telemetry disabled)
61+
const promptsInSpans = spans
62+
.map(span => span.data?.['ai.prompt'])
63+
.filter((prompt): prompt is string => prompt !== undefined);
64+
const hasDisabledPrompt = promptsInSpans.some(prompt => prompt.includes('Where is the third span?'));
65+
expect(hasDisabledPrompt).toBe(false);
66+
67+
// Verify results are displayed on the page
68+
const resultsText = await page.locator('#ai-results').textContent();
69+
expect(resultsText).toContain('First span here!');
70+
expect(resultsText).toContain('Second span here!');
71+
expect(resultsText).toContain('Tool call completed!');
72+
expect(resultsText).toContain('Third span here!');
73+
});

packages/node/src/integrations/modules.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import { existsSync, readFileSync } from 'node:fs';
22
import { dirname, join } from 'node:path';
33
import type { IntegrationFn } from '@sentry/core';
4-
import { defineIntegration } from '@sentry/core';
54
import { isCjs } from '../utils/commonjs';
65

76
type ModuleInfo = Record<string, string>;
@@ -29,6 +28,7 @@ const _modulesIntegration = (() => {
2928

3029
return event;
3130
},
31+
getModules: _getModules,
3232
};
3333
}) satisfies IntegrationFn;
3434

@@ -39,7 +39,7 @@ const _modulesIntegration = (() => {
3939
* - They are extracted from the dependencies & devDependencies in the package.json file
4040
* - They are extracted from the require.cache (CJS only)
4141
*/
42-
export const modulesIntegration = defineIntegration(_modulesIntegration);
42+
export const modulesIntegration = _modulesIntegration;
4343

4444
function getRequireCachePaths(): string[] {
4545
try {

packages/node/src/integrations/tracing/vercelai/index.ts

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
/* eslint-disable @typescript-eslint/no-dynamic-delete */
22
/* eslint-disable complexity */
3-
import type { IntegrationFn } from '@sentry/core';
3+
import type { Client, IntegrationFn } from '@sentry/core';
44
import { defineIntegration, SEMANTIC_ATTRIBUTE_SENTRY_OP, spanToJSON } from '@sentry/core';
55
import { generateInstrumentOnce } from '../../../otel/instrument';
66
import { addOriginToSpan } from '../../../utils/addOriginToSpan';
7+
import type { modulesIntegration } from '../../modules';
78
import {
89
AI_MODEL_ID_ATTRIBUTE,
910
AI_MODEL_PROVIDER_ATTRIBUTE,
@@ -23,6 +24,15 @@ import type { VercelAiOptions } from './types';
2324

2425
export const instrumentVercelAi = generateInstrumentOnce(INTEGRATION_NAME, () => new SentryVercelAiInstrumentation({}));
2526

27+
/**
28+
* Determines if the integration should be forced based on environment and package availability.
29+
* Returns true if the 'ai' package is available.
30+
*/
31+
function shouldForceIntegration(client: Client): boolean {
32+
const modules = client.getIntegrationByName<ReturnType<typeof modulesIntegration>>('Modules');
33+
return !!modules?.getModules?.()?.ai;
34+
}
35+
2636
const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
2737
let instrumentation: undefined | SentryVercelAiInstrumentation;
2838

@@ -32,7 +42,7 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
3242
setupOnce() {
3343
instrumentation = instrumentVercelAi();
3444
},
35-
setup(client) {
45+
afterAllSetup(client) {
3646
function registerProcessors(): void {
3747
client.on('spanStart', span => {
3848
const { data: attributes, description: name } = spanToJSON(span);
@@ -190,7 +200,11 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
190200
});
191201
}
192202

193-
if (options.force) {
203+
// Auto-detect if we should force the integration when running with 'ai' package available
204+
// Note that this can only be detected if the 'Modules' integration is available, and running in CJS mode
205+
const shouldForce = options.force ?? shouldForceIntegration(client);
206+
207+
if (shouldForce) {
194208
registerProcessors();
195209
} else {
196210
instrumentation?.callWhenPatched(registerProcessors);
@@ -213,6 +227,9 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
213227
* });
214228
* ```
215229
*
230+
* The integration automatically detects when to force registration in CommonJS environments
231+
* when the 'ai' package is available. You can still manually set the `force` option if needed.
232+
*
216233
* By default this integration adds tracing support to all `ai` function calls. If you need to disable
217234
* collecting spans for a specific call, you can do so by setting `experimental_telemetry.isEnabled` to
218235
* `false` in the first argument of the function call.

0 commit comments

Comments
 (0)