@@ -8,11 +8,10 @@ import { context, propagation } from '@opentelemetry/api';
8
8
import { VERSION } from '@opentelemetry/core' ;
9
9
import type { InstrumentationConfig } from '@opentelemetry/instrumentation' ;
10
10
import { InstrumentationBase , InstrumentationNodeModuleDefinition } from '@opentelemetry/instrumentation' ;
11
- import type { AggregationCounts , Client , SanitizedRequestData , Scope } from '@sentry/core' ;
12
- import {
13
- addBreadcrumb ,
11
+ import type { AggregationCounts , Client , SanitizedRequestData , Scope } from '@sentry/core' ;
12
+ import { addBreadcrumb ,
14
13
addNonEnumerableProperty ,
15
- generateSpanId ,
14
+ flush , generateSpanId ,
16
15
getBreadcrumbLogLevelFromHttpStatusCode ,
17
16
getClient ,
18
17
getCurrentScope ,
@@ -22,6 +21,7 @@ import {
22
21
logger ,
23
22
parseUrl ,
24
23
stripUrlQueryAndFragment ,
24
+ vercelWaitUntil ,
25
25
withIsolationScope ,
26
26
} from '@sentry/core' ;
27
27
import { DEBUG_BUILD } from '../../debug-build' ;
@@ -127,6 +127,11 @@ export class SentryHttpInstrumentation extends InstrumentationBase<SentryHttpIns
127
127
this . _onOutgoingRequestFinish ( data . request , undefined ) ;
128
128
} ) satisfies ChannelListener ;
129
129
130
+ const onHttpServerResponseCreated = ( ( _data : unknown ) => {
131
+ const data = _data as { response : http . OutgoingMessage } ;
132
+ patchResponseToFlushOnServerlessPlatformsOnce ( data . response ) ;
133
+ } ) satisfies ChannelListener ;
134
+
130
135
/**
131
136
* You may be wondering why we register these diagnostics-channel listeners
132
137
* in such a convoluted way (as InstrumentationNodeModuleDefinition...)˝,
@@ -153,6 +158,11 @@ export class SentryHttpInstrumentation extends InstrumentationBase<SentryHttpIns
153
158
// In this case, `http.client.response.finish` is not triggered
154
159
subscribe ( 'http.client.request.error' , onHttpClientRequestError ) ;
155
160
161
+ // On vercel, ensure that we flush events before the lambda freezes
162
+ if ( process . env . VERCEL ) {
163
+ subscribe ( 'http.server.response.created' , onHttpServerResponseCreated ) ;
164
+ }
165
+
156
166
return moduleExports ;
157
167
} ,
158
168
( ) => {
@@ -178,6 +188,11 @@ export class SentryHttpInstrumentation extends InstrumentationBase<SentryHttpIns
178
188
// In this case, `http.client.response.finish` is not triggered
179
189
subscribe ( 'http.client.request.error' , onHttpClientRequestError ) ;
180
190
191
+ // On vercel, ensure that we flush events before the lambda freezes
192
+ if ( process . env . VERCEL ) {
193
+ subscribe ( 'http.server.response.created' , onHttpServerResponseCreated ) ;
194
+ }
195
+
181
196
return moduleExports ;
182
197
} ,
183
198
( ) => {
@@ -529,6 +544,66 @@ export function recordRequestSession({
529
544
} ) ;
530
545
}
531
546
547
+ function patchResponseToFlushOnServerlessPlatformsOnce ( res : http . OutgoingMessage ) : void {
548
+ // This means it was already patched, do nothing
549
+ if ( ( res as { __sentry_patched__ ?: boolean } ) . __sentry_patched__ ) {
550
+ return ;
551
+ }
552
+
553
+ DEBUG_BUILD && logger . log ( INSTRUMENTATION_NAME , 'Patching server.end()' ) ;
554
+ addNonEnumerableProperty ( res , '__sentry_patched__' , true ) ;
555
+
556
+ // This is vercel specific handling to flush events before the lambda freezes
557
+
558
+ // In some cases res.end does not seem to be defined leading to errors if passed to Proxy
559
+ // https://github.com/getsentry/sentry-javascript/issues/15759
560
+ if ( typeof res . end !== 'function' ) {
561
+ return ;
562
+ }
563
+
564
+ let markOnEndDone = ( ) : void => undefined ;
565
+ const onEndDonePromise = new Promise < void > ( res => {
566
+ markOnEndDone = res ;
567
+ } ) ;
568
+
569
+ res . on ( 'close' , ( ) => {
570
+ markOnEndDone ( ) ;
571
+ } ) ;
572
+
573
+ // eslint-disable-next-line @typescript-eslint/unbound-method
574
+ res . end = new Proxy ( res . end , {
575
+ apply ( target , thisArg , argArray ) {
576
+ vercelWaitUntil (
577
+ new Promise < void > ( finishWaitUntil => {
578
+ // Define a timeout that unblocks the lambda just to be safe so we're not indefinitely keeping it alive, exploding server bills
579
+ const timeout = setTimeout ( ( ) => {
580
+ finishWaitUntil ( ) ;
581
+ } , 2000 ) ;
582
+
583
+ onEndDonePromise
584
+ . then ( ( ) => {
585
+ DEBUG_BUILD && logger . log ( 'Flushing events before Vercel Lambda freeze' ) ;
586
+ return flush ( 2000 ) ;
587
+ } )
588
+ . then (
589
+ ( ) => {
590
+ clearTimeout ( timeout ) ;
591
+ finishWaitUntil ( ) ;
592
+ } ,
593
+ e => {
594
+ clearTimeout ( timeout ) ;
595
+ DEBUG_BUILD && logger . log ( 'Error while flushing events for Vercel:\n' , e ) ;
596
+ finishWaitUntil ( ) ;
597
+ } ,
598
+ ) ;
599
+ } ) ,
600
+ ) ;
601
+
602
+ return target . apply ( thisArg , argArray ) ;
603
+ } ,
604
+ } ) ;
605
+ }
606
+
532
607
const clientToRequestSessionAggregatesMap = new Map <
533
608
Client ,
534
609
{ [ timestampRoundedToSeconds : string ] : { exited : number ; crashed : number ; errored : number } }
0 commit comments