41
41
42
42
#ifdef CONFIG_CONTEXT_TRACKING
43
43
/* Called on entry from user mode with IRQs off. */
44
- __visible inline noinstr void enter_from_user_mode (void )
44
+ __visible noinstr void enter_from_user_mode (void )
45
45
{
46
- CT_WARN_ON (ct_state () != CONTEXT_USER );
46
+ enum ctx_state state = ct_state ();
47
+
47
48
user_exit_irqoff ();
49
+
50
+ instrumentation_begin ();
51
+ CT_WARN_ON (state != CONTEXT_USER );
52
+ instrumentation_end ();
48
53
}
49
54
#else
50
55
static inline void enter_from_user_mode (void ) {}
51
56
#endif
52
57
58
+ static noinstr void exit_to_user_mode (void )
59
+ {
60
+ user_enter_irqoff ();
61
+ mds_user_clear_cpu_buffers ();
62
+ }
63
+
53
64
static void do_audit_syscall_entry (struct pt_regs * regs , u32 arch )
54
65
{
55
66
#ifdef CONFIG_X86_64
@@ -179,8 +190,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
179
190
}
180
191
}
181
192
182
- /* Called with IRQs disabled. */
183
- __visible inline void prepare_exit_to_usermode (struct pt_regs * regs )
193
+ static void __prepare_exit_to_usermode (struct pt_regs * regs )
184
194
{
185
195
struct thread_info * ti = current_thread_info ();
186
196
u32 cached_flags ;
@@ -219,10 +229,14 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
219
229
*/
220
230
ti -> status &= ~(TS_COMPAT |TS_I386_REGS_POKED );
221
231
#endif
232
+ }
222
233
223
- user_enter_irqoff ();
224
-
225
- mds_user_clear_cpu_buffers ();
234
+ __visible noinstr void prepare_exit_to_usermode (struct pt_regs * regs )
235
+ {
236
+ instrumentation_begin ();
237
+ __prepare_exit_to_usermode (regs );
238
+ instrumentation_end ();
239
+ exit_to_user_mode ();
226
240
}
227
241
228
242
#define SYSCALL_EXIT_WORK_FLAGS \
@@ -251,11 +265,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
251
265
tracehook_report_syscall_exit (regs , step );
252
266
}
253
267
254
- /*
255
- * Called with IRQs on and fully valid regs. Returns with IRQs off in a
256
- * state such that we can immediately switch to user mode.
257
- */
258
- __visible inline void syscall_return_slowpath (struct pt_regs * regs )
268
+ static void __syscall_return_slowpath (struct pt_regs * regs )
259
269
{
260
270
struct thread_info * ti = current_thread_info ();
261
271
u32 cached_flags = READ_ONCE (ti -> flags );
@@ -276,15 +286,29 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
276
286
syscall_slow_exit_work (regs , cached_flags );
277
287
278
288
local_irq_disable ();
279
- prepare_exit_to_usermode (regs );
289
+ __prepare_exit_to_usermode (regs );
290
+ }
291
+
292
+ /*
293
+ * Called with IRQs on and fully valid regs. Returns with IRQs off in a
294
+ * state such that we can immediately switch to user mode.
295
+ */
296
+ __visible noinstr void syscall_return_slowpath (struct pt_regs * regs )
297
+ {
298
+ instrumentation_begin ();
299
+ __syscall_return_slowpath (regs );
300
+ instrumentation_end ();
301
+ exit_to_user_mode ();
280
302
}
281
303
282
304
#ifdef CONFIG_X86_64
283
- __visible void do_syscall_64 (unsigned long nr , struct pt_regs * regs )
305
+ __visible noinstr void do_syscall_64 (unsigned long nr , struct pt_regs * regs )
284
306
{
285
307
struct thread_info * ti ;
286
308
287
309
enter_from_user_mode ();
310
+ instrumentation_begin ();
311
+
288
312
local_irq_enable ();
289
313
ti = current_thread_info ();
290
314
if (READ_ONCE (ti -> flags ) & _TIF_WORK_SYSCALL_ENTRY )
@@ -301,8 +325,10 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
301
325
regs -> ax = x32_sys_call_table [nr ](regs );
302
326
#endif
303
327
}
328
+ __syscall_return_slowpath (regs );
304
329
305
- syscall_return_slowpath (regs );
330
+ instrumentation_end ();
331
+ exit_to_user_mode ();
306
332
}
307
333
#endif
308
334
@@ -313,7 +339,7 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
313
339
* extremely hot in workloads that use it, and it's usually called from
314
340
* do_fast_syscall_32, so forcibly inline it to improve performance.
315
341
*/
316
- static __always_inline void do_syscall_32_irqs_on (struct pt_regs * regs )
342
+ static void do_syscall_32_irqs_on (struct pt_regs * regs )
317
343
{
318
344
struct thread_info * ti = current_thread_info ();
319
345
unsigned int nr = (unsigned int )regs -> orig_ax ;
@@ -337,27 +363,62 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
337
363
regs -> ax = ia32_sys_call_table [nr ](regs );
338
364
}
339
365
340
- syscall_return_slowpath (regs );
366
+ __syscall_return_slowpath (regs );
341
367
}
342
368
343
369
/* Handles int $0x80 */
344
- __visible void do_int80_syscall_32 (struct pt_regs * regs )
370
+ __visible noinstr void do_int80_syscall_32 (struct pt_regs * regs )
345
371
{
346
372
enter_from_user_mode ();
373
+ instrumentation_begin ();
374
+
347
375
local_irq_enable ();
348
376
do_syscall_32_irqs_on (regs );
377
+
378
+ instrumentation_end ();
379
+ exit_to_user_mode ();
380
+ }
381
+
382
+ static bool __do_fast_syscall_32 (struct pt_regs * regs )
383
+ {
384
+ int res ;
385
+
386
+ /* Fetch EBP from where the vDSO stashed it. */
387
+ if (IS_ENABLED (CONFIG_X86_64 )) {
388
+ /*
389
+ * Micro-optimization: the pointer we're following is
390
+ * explicitly 32 bits, so it can't be out of range.
391
+ */
392
+ res = __get_user (* (u32 * )& regs -> bp ,
393
+ (u32 __user __force * )(unsigned long )(u32 )regs -> sp );
394
+ } else {
395
+ res = get_user (* (u32 * )& regs -> bp ,
396
+ (u32 __user __force * )(unsigned long )(u32 )regs -> sp );
397
+ }
398
+
399
+ if (res ) {
400
+ /* User code screwed up. */
401
+ regs -> ax = - EFAULT ;
402
+ local_irq_disable ();
403
+ __prepare_exit_to_usermode (regs );
404
+ return false;
405
+ }
406
+
407
+ /* Now this is just like a normal syscall. */
408
+ do_syscall_32_irqs_on (regs );
409
+ return true;
349
410
}
350
411
351
412
/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
352
- __visible long do_fast_syscall_32 (struct pt_regs * regs )
413
+ __visible noinstr long do_fast_syscall_32 (struct pt_regs * regs )
353
414
{
354
415
/*
355
416
* Called using the internal vDSO SYSENTER/SYSCALL32 calling
356
417
* convention. Adjust regs so it looks like we entered using int80.
357
418
*/
358
-
359
419
unsigned long landing_pad = (unsigned long )current -> mm -> context .vdso +
360
- vdso_image_32 .sym_int80_landing_pad ;
420
+ vdso_image_32 .sym_int80_landing_pad ;
421
+ bool success ;
361
422
362
423
/*
363
424
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
@@ -367,33 +428,17 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
367
428
regs -> ip = landing_pad ;
368
429
369
430
enter_from_user_mode ();
431
+ instrumentation_begin ();
370
432
371
433
local_irq_enable ();
434
+ success = __do_fast_syscall_32 (regs );
372
435
373
- /* Fetch EBP from where the vDSO stashed it. */
374
- if (
375
- #ifdef CONFIG_X86_64
376
- /*
377
- * Micro-optimization: the pointer we're following is explicitly
378
- * 32 bits, so it can't be out of range.
379
- */
380
- __get_user (* (u32 * )& regs -> bp ,
381
- (u32 __user __force * )(unsigned long )(u32 )regs -> sp )
382
- #else
383
- get_user (* (u32 * )& regs -> bp ,
384
- (u32 __user __force * )(unsigned long )(u32 )regs -> sp )
385
- #endif
386
- ) {
387
-
388
- /* User code screwed up. */
389
- local_irq_disable ();
390
- regs -> ax = - EFAULT ;
391
- prepare_exit_to_usermode (regs );
392
- return 0 ; /* Keep it simple: use IRET. */
393
- }
436
+ instrumentation_end ();
437
+ exit_to_user_mode ();
394
438
395
- /* Now this is just like a normal syscall. */
396
- do_syscall_32_irqs_on (regs );
439
+ /* If it failed, keep it simple: use IRET. */
440
+ if (!success )
441
+ return 0 ;
397
442
398
443
#ifdef CONFIG_X86_64
399
444
/*
0 commit comments