Skip to content

feat(node): Automatically enable vercelAiIntegration when ai module is detected #16565

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jun 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import { generateText } from 'ai';
import { MockLanguageModelV1 } from 'ai/test';
import { z } from 'zod';
import * as Sentry from '@sentry/nextjs';

export const dynamic = 'force-dynamic';

async function runAITest() {
// First span - telemetry should be enabled automatically but no input/output recorded when sendDefaultPii: true
const result1 = await generateText({
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'First span here!',
}),
}),
prompt: 'Where is the first span?',
});

// Second span - explicitly enabled telemetry, should record inputs/outputs
const result2 = await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Second span here!',
}),
}),
prompt: 'Where is the second span?',
});

// Third span - with tool calls and tool results
const result3 = await generateText({
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'tool-calls',
usage: { promptTokens: 15, completionTokens: 25 },
text: 'Tool call completed!',
toolCalls: [
{
toolCallType: 'function',
toolCallId: 'call-1',
toolName: 'getWeather',
args: '{ "location": "San Francisco" }',
},
],
}),
}),
tools: {
getWeather: {
parameters: z.object({ location: z.string() }),
execute: async (args) => {
return `Weather in ${args.location}: Sunny, 72°F`;
},
},
},
prompt: 'What is the weather in San Francisco?',
});

// Fourth span - explicitly disabled telemetry, should not be captured
const result4 = await generateText({
experimental_telemetry: { isEnabled: false },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Third span here!',
}),
}),
prompt: 'Where is the third span?',
});

return {
result1: result1.text,
result2: result2.text,
result3: result3.text,
result4: result4.text,
};
}

export default async function Page() {
const results = await Sentry.startSpan(
{ op: 'function', name: 'ai-test' },
async () => {
return await runAITest();
}
);

return (
<div>
<h1>AI Test Results</h1>
<pre id="ai-results">{JSON.stringify(results, null, 2)}</pre>
</div>
);
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@
"@types/node": "^18.19.1",
"@types/react": "18.0.26",
"@types/react-dom": "18.0.9",
"ai": "^3.0.0",
"next": "15.3.0-canary.33",
"react": "beta",
"react-dom": "beta",
"typescript": "~5.0.0"
"typescript": "~5.0.0",
"zod": "^3.22.4"
},
"devDependencies": {
"@playwright/test": "~1.50.0",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,7 @@ Sentry.init({
// We are doing a lot of events at once in this test
bufferSize: 1000,
},
integrations: [
Sentry.vercelAIIntegration(),
],
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import { expect, test } from '@playwright/test';
import { waitForTransaction } from '@sentry-internal/test-utils';

test('should create AI spans with correct attributes', async ({ page }) => {
const aiTransactionPromise = waitForTransaction('nextjs-15', async transactionEvent => {
return transactionEvent?.transaction === 'ai-test';
});

await page.goto('/ai-test');

const aiTransaction = await aiTransactionPromise;

expect(aiTransaction).toBeDefined();
expect(aiTransaction.contexts?.trace?.op).toBe('function');
expect(aiTransaction.transaction).toBe('ai-test');

const spans = aiTransaction.spans || [];

// We expect spans for the first 3 AI calls (4th is disabled)
// Each generateText call should create 2 spans: one for the pipeline and one for doGenerate
// Plus a span for the tool call
// TODO: For now, this is sadly not fully working - the monkey patching of the ai package is not working
// because of this, only spans that are manually opted-in at call time will be captured
// this may be fixed by https://github.com/vercel/ai/pull/6716 in the future
const aiPipelineSpans = spans.filter(span => span.op === 'ai.pipeline.generate_text');
const aiGenerateSpans = spans.filter(span => span.op === 'gen_ai.generate_text');
const toolCallSpans = spans.filter(span => span.op === 'gen_ai.execute_tool');

expect(aiPipelineSpans.length).toBeGreaterThanOrEqual(1);
expect(aiGenerateSpans.length).toBeGreaterThanOrEqual(1);
expect(toolCallSpans.length).toBeGreaterThanOrEqual(0);

// First AI call - should have telemetry enabled and record inputs/outputs (sendDefaultPii: true)
/* const firstPipelineSpan = aiPipelineSpans[0];
expect(firstPipelineSpan?.data?.['ai.model.id']).toBe('mock-model-id');
expect(firstPipelineSpan?.data?.['ai.model.provider']).toBe('mock-provider');
expect(firstPipelineSpan?.data?.['ai.prompt']).toContain('Where is the first span?');
expect(firstPipelineSpan?.data?.['ai.response.text']).toBe('First span here!');
expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */

// Second AI call - explicitly enabled telemetry
const secondPipelineSpan = aiPipelineSpans[0];
expect(secondPipelineSpan?.data?.['ai.prompt']).toContain('Where is the second span?');
expect(secondPipelineSpan?.data?.['ai.response.text']).toContain('Second span here!');

// Third AI call - with tool calls
/* const thirdPipelineSpan = aiPipelineSpans[2];
expect(thirdPipelineSpan?.data?.['ai.response.finishReason']).toBe('tool-calls');
expect(thirdPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(15);
expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */

// Tool call span
/* const toolSpan = toolCallSpans[0];
expect(toolSpan?.data?.['ai.toolCall.name']).toBe('getWeather');
expect(toolSpan?.data?.['ai.toolCall.id']).toBe('call-1');
expect(toolSpan?.data?.['ai.toolCall.args']).toContain('San Francisco');
expect(toolSpan?.data?.['ai.toolCall.result']).toContain('Sunny, 72°F'); */

// Verify the fourth call was not captured (telemetry disabled)
const promptsInSpans = spans
.map(span => span.data?.['ai.prompt'])
.filter((prompt): prompt is string => prompt !== undefined);
const hasDisabledPrompt = promptsInSpans.some(prompt => prompt.includes('Where is the third span?'));
expect(hasDisabledPrompt).toBe(false);

// Verify results are displayed on the page
const resultsText = await page.locator('#ai-results').textContent();
expect(resultsText).toContain('First span here!');
expect(resultsText).toContain('Second span here!');
expect(resultsText).toContain('Tool call completed!');
expect(resultsText).toContain('Third span here!');
});
4 changes: 2 additions & 2 deletions packages/node/src/integrations/modules.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import { existsSync, readFileSync } from 'node:fs';
import { dirname, join } from 'node:path';
import type { IntegrationFn } from '@sentry/core';
import { defineIntegration } from '@sentry/core';
import { isCjs } from '../utils/commonjs';

type ModuleInfo = Record<string, string>;
Expand Down Expand Up @@ -29,6 +28,7 @@ const _modulesIntegration = (() => {

return event;
},
getModules: _getModules,
};
}) satisfies IntegrationFn;

Expand All @@ -39,7 +39,7 @@ const _modulesIntegration = (() => {
* - They are extracted from the dependencies & devDependencies in the package.json file
* - They are extracted from the require.cache (CJS only)
*/
export const modulesIntegration = defineIntegration(_modulesIntegration);
export const modulesIntegration = _modulesIntegration;

function getRequireCachePaths(): string[] {
try {
Expand Down
23 changes: 20 additions & 3 deletions packages/node/src/integrations/tracing/vercelai/index.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
/* eslint-disable @typescript-eslint/no-dynamic-delete */
/* eslint-disable complexity */
import type { IntegrationFn } from '@sentry/core';
import type { Client, IntegrationFn } from '@sentry/core';
import { defineIntegration, SEMANTIC_ATTRIBUTE_SENTRY_OP, spanToJSON } from '@sentry/core';
import { generateInstrumentOnce } from '../../../otel/instrument';
import { addOriginToSpan } from '../../../utils/addOriginToSpan';
import type { modulesIntegration } from '../../modules';
import {
AI_MODEL_ID_ATTRIBUTE,
AI_MODEL_PROVIDER_ATTRIBUTE,
Expand All @@ -23,6 +24,15 @@ import type { VercelAiOptions } from './types';

export const instrumentVercelAi = generateInstrumentOnce(INTEGRATION_NAME, () => new SentryVercelAiInstrumentation({}));

/**
* Determines if the integration should be forced based on environment and package availability.
* Returns true if the 'ai' package is available.
*/
function shouldForceIntegration(client: Client): boolean {
const modules = client.getIntegrationByName<ReturnType<typeof modulesIntegration>>('Modules');
return !!modules?.getModules?.()?.ai;
}

const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
let instrumentation: undefined | SentryVercelAiInstrumentation;

Expand All @@ -32,7 +42,7 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
setupOnce() {
instrumentation = instrumentVercelAi();
},
setup(client) {
afterAllSetup(client) {
function registerProcessors(): void {
client.on('spanStart', span => {
const { data: attributes, description: name } = spanToJSON(span);
Expand Down Expand Up @@ -190,7 +200,11 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
});
}

if (options.force) {
// Auto-detect if we should force the integration when running with 'ai' package available
// Note that this can only be detected if the 'Modules' integration is available, and running in CJS mode
const shouldForce = options.force ?? shouldForceIntegration(client);

if (shouldForce) {
registerProcessors();
} else {
instrumentation?.callWhenPatched(registerProcessors);
Expand All @@ -213,6 +227,9 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
* });
* ```
*
* The integration automatically detects when to force registration in CommonJS environments
* when the 'ai' package is available. You can still manually set the `force` option if needed.
*
* By default this integration adds tracing support to all `ai` function calls. If you need to disable
* collecting spans for a specific call, you can do so by setting `experimental_telemetry.isEnabled` to
* `false` in the first argument of the function call.
Expand Down
Loading