Skip to content

Commit b037b09

Browse files
amlutoKAGA-KOKO
authored andcommitted
x86/entry: Rename idtentry_enter/exit_cond_rcu() to idtentry_enter/exit()
They were originally called _cond_rcu because they were special versions with conditional RCU handling. Now they're the standard entry and exit path, so the _cond_rcu part is just confusing. Drop it. Also change the signature to make them more extensible and more foolproof. No functional change -- it's pure refactoring. Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/247fc67685263e0b673e1d7f808182d28ff80359.1593795633.git.luto@kernel.org
1 parent dcb7fd8 commit b037b09

File tree

5 files changed

+53
-43
lines changed

5 files changed

+53
-43
lines changed

arch/x86/entry/common.c

Lines changed: 28 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -559,8 +559,7 @@ SYSCALL_DEFINE0(ni_syscall)
559559
}
560560

561561
/**
562-
* idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
563-
* RCU handling
562+
* idtentry_enter - Handle state tracking on ordinary idtentries
564563
* @regs: Pointer to pt_regs of interrupted context
565564
*
566565
* Invokes:
@@ -572,6 +571,9 @@ SYSCALL_DEFINE0(ni_syscall)
572571
* - The hardirq tracer to keep the state consistent as low level ASM
573572
* entry disabled interrupts.
574573
*
574+
* As a precondition, this requires that the entry came from user mode,
575+
* idle, or a kernel context in which RCU is watching.
576+
*
575577
* For kernel mode entries RCU handling is done conditional. If RCU is
576578
* watching then the only RCU requirement is to check whether the tick has
577579
* to be restarted. If RCU is not watching then rcu_irq_enter() has to be
@@ -585,18 +587,21 @@ SYSCALL_DEFINE0(ni_syscall)
585587
* establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
586588
* would not be possible.
587589
*
588-
* Returns: True if RCU has been adjusted on a kernel entry
589-
* False otherwise
590+
* Returns: An opaque object that must be passed to idtentry_exit()
590591
*
591-
* The return value must be fed into the rcu_exit argument of
592-
* idtentry_exit_cond_rcu().
592+
* The return value must be fed into the state argument of
593+
* idtentry_exit().
593594
*/
594-
bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
595+
idtentry_state_t noinstr idtentry_enter(struct pt_regs *regs)
595596
{
597+
idtentry_state_t ret = {
598+
.exit_rcu = false,
599+
};
600+
596601
if (user_mode(regs)) {
597602
check_user_regs(regs);
598603
enter_from_user_mode();
599-
return false;
604+
return ret;
600605
}
601606

602607
/*
@@ -634,7 +639,8 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
634639
trace_hardirqs_off_finish();
635640
instrumentation_end();
636641

637-
return true;
642+
ret.exit_rcu = true;
643+
return ret;
638644
}
639645

640646
/*
@@ -649,7 +655,7 @@ bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
649655
trace_hardirqs_off();
650656
instrumentation_end();
651657

652-
return false;
658+
return ret;
653659
}
654660

655661
static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
@@ -667,10 +673,9 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
667673
}
668674

669675
/**
670-
* idtentry_exit_cond_rcu - Handle return from exception with conditional RCU
671-
* handling
676+
* idtentry_exit - Handle return from exception that used idtentry_enter()
672677
* @regs: Pointer to pt_regs (exception entry regs)
673-
* @rcu_exit: Invoke rcu_irq_exit() if true
678+
* @state: Return value from matching call to idtentry_enter()
674679
*
675680
* Depending on the return target (kernel/user) this runs the necessary
676681
* preemption and work checks if possible and reguired and returns to
@@ -679,10 +684,10 @@ static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
679684
* This is the last action before returning to the low level ASM code which
680685
* just needs to return to the appropriate context.
681686
*
682-
* Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
683-
* function must be fed into the @rcu_exit argument.
687+
* Counterpart to idtentry_enter(). The return value of the entry
688+
* function must be fed into the @state argument.
684689
*/
685-
void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
690+
void noinstr idtentry_exit(struct pt_regs *regs, idtentry_state_t state)
686691
{
687692
lockdep_assert_irqs_disabled();
688693

@@ -695,7 +700,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
695700
* carefully and needs the same ordering of lockdep/tracing
696701
* and RCU as the return to user mode path.
697702
*/
698-
if (rcu_exit) {
703+
if (state.exit_rcu) {
699704
instrumentation_begin();
700705
/* Tell the tracer that IRET will enable interrupts */
701706
trace_hardirqs_on_prepare();
@@ -714,7 +719,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
714719
* IRQ flags state is correct already. Just tell RCU if it
715720
* was not watching on entry.
716721
*/
717-
if (rcu_exit)
722+
if (state.exit_rcu)
718723
rcu_irq_exit();
719724
}
720725
}
@@ -800,9 +805,10 @@ static void __xen_pv_evtchn_do_upcall(void)
800805
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
801806
{
802807
struct pt_regs *old_regs;
803-
bool inhcall, rcu_exit;
808+
bool inhcall;
809+
idtentry_state_t state;
804810

805-
rcu_exit = idtentry_enter_cond_rcu(regs);
811+
state = idtentry_enter(regs);
806812
old_regs = set_irq_regs(regs);
807813

808814
instrumentation_begin();
@@ -812,13 +818,13 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
812818
set_irq_regs(old_regs);
813819

814820
inhcall = get_and_clear_inhcall();
815-
if (inhcall && !WARN_ON_ONCE(rcu_exit)) {
821+
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
816822
instrumentation_begin();
817823
idtentry_exit_cond_resched(regs, true);
818824
instrumentation_end();
819825
restore_inhcall(inhcall);
820826
} else {
821-
idtentry_exit_cond_rcu(regs, rcu_exit);
827+
idtentry_exit(regs, state);
822828
}
823829
}
824830
#endif /* CONFIG_XEN_PV */

arch/x86/include/asm/idtentry.h

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,12 @@
1313
void idtentry_enter_user(struct pt_regs *regs);
1414
void idtentry_exit_user(struct pt_regs *regs);
1515

16-
bool idtentry_enter_cond_rcu(struct pt_regs *regs);
17-
void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
16+
typedef struct idtentry_state {
17+
bool exit_rcu;
18+
} idtentry_state_t;
19+
20+
idtentry_state_t idtentry_enter(struct pt_regs *regs);
21+
void idtentry_exit(struct pt_regs *regs, idtentry_state_t state);
1822

1923
/**
2024
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
@@ -54,12 +58,12 @@ static __always_inline void __##func(struct pt_regs *regs); \
5458
\
5559
__visible noinstr void func(struct pt_regs *regs) \
5660
{ \
57-
bool rcu_exit = idtentry_enter_cond_rcu(regs); \
61+
idtentry_state_t state = idtentry_enter(regs); \
5862
\
5963
instrumentation_begin(); \
6064
__##func (regs); \
6165
instrumentation_end(); \
62-
idtentry_exit_cond_rcu(regs, rcu_exit); \
66+
idtentry_exit(regs, state); \
6367
} \
6468
\
6569
static __always_inline void __##func(struct pt_regs *regs)
@@ -101,12 +105,12 @@ static __always_inline void __##func(struct pt_regs *regs, \
101105
__visible noinstr void func(struct pt_regs *regs, \
102106
unsigned long error_code) \
103107
{ \
104-
bool rcu_exit = idtentry_enter_cond_rcu(regs); \
108+
idtentry_state_t state = idtentry_enter(regs); \
105109
\
106110
instrumentation_begin(); \
107111
__##func (regs, error_code); \
108112
instrumentation_end(); \
109-
idtentry_exit_cond_rcu(regs, rcu_exit); \
113+
idtentry_exit(regs, state); \
110114
} \
111115
\
112116
static __always_inline void __##func(struct pt_regs *regs, \
@@ -199,15 +203,15 @@ static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
199203
__visible noinstr void func(struct pt_regs *regs, \
200204
unsigned long error_code) \
201205
{ \
202-
bool rcu_exit = idtentry_enter_cond_rcu(regs); \
206+
idtentry_state_t state = idtentry_enter(regs); \
203207
\
204208
instrumentation_begin(); \
205209
irq_enter_rcu(); \
206210
kvm_set_cpu_l1tf_flush_l1d(); \
207211
__##func (regs, (u8)error_code); \
208212
irq_exit_rcu(); \
209213
instrumentation_end(); \
210-
idtentry_exit_cond_rcu(regs, rcu_exit); \
214+
idtentry_exit(regs, state); \
211215
} \
212216
\
213217
static __always_inline void __##func(struct pt_regs *regs, u8 vector)
@@ -241,15 +245,15 @@ static void __##func(struct pt_regs *regs); \
241245
\
242246
__visible noinstr void func(struct pt_regs *regs) \
243247
{ \
244-
bool rcu_exit = idtentry_enter_cond_rcu(regs); \
248+
idtentry_state_t state = idtentry_enter(regs); \
245249
\
246250
instrumentation_begin(); \
247251
irq_enter_rcu(); \
248252
kvm_set_cpu_l1tf_flush_l1d(); \
249253
run_on_irqstack_cond(__##func, regs, regs); \
250254
irq_exit_rcu(); \
251255
instrumentation_end(); \
252-
idtentry_exit_cond_rcu(regs, rcu_exit); \
256+
idtentry_exit(regs, state); \
253257
} \
254258
\
255259
static noinline void __##func(struct pt_regs *regs)
@@ -270,15 +274,15 @@ static __always_inline void __##func(struct pt_regs *regs); \
270274
\
271275
__visible noinstr void func(struct pt_regs *regs) \
272276
{ \
273-
bool rcu_exit = idtentry_enter_cond_rcu(regs); \
277+
idtentry_state_t state = idtentry_enter(regs); \
274278
\
275279
instrumentation_begin(); \
276280
__irq_enter_raw(); \
277281
kvm_set_cpu_l1tf_flush_l1d(); \
278282
__##func (regs); \
279283
__irq_exit_raw(); \
280284
instrumentation_end(); \
281-
idtentry_exit_cond_rcu(regs, rcu_exit); \
285+
idtentry_exit(regs, state); \
282286
} \
283287
\
284288
static __always_inline void __##func(struct pt_regs *regs)

arch/x86/kernel/kvm.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
233233
noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
234234
{
235235
u32 reason = kvm_read_and_reset_apf_flags();
236-
bool rcu_exit;
236+
idtentry_state_t state;
237237

238238
switch (reason) {
239239
case KVM_PV_REASON_PAGE_NOT_PRESENT:
@@ -243,7 +243,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
243243
return false;
244244
}
245245

246-
rcu_exit = idtentry_enter_cond_rcu(regs);
246+
state = idtentry_enter(regs);
247247
instrumentation_begin();
248248

249249
/*
@@ -264,7 +264,7 @@ noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
264264
}
265265

266266
instrumentation_end();
267-
idtentry_exit_cond_rcu(regs, rcu_exit);
267+
idtentry_exit(regs, state);
268268
return true;
269269
}
270270

arch/x86/kernel/traps.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ static noinstr bool handle_bug(struct pt_regs *regs)
245245

246246
DEFINE_IDTENTRY_RAW(exc_invalid_op)
247247
{
248-
bool rcu_exit;
248+
idtentry_state_t state;
249249

250250
/*
251251
* We use UD2 as a short encoding for 'CALL __WARN', as such
@@ -255,11 +255,11 @@ DEFINE_IDTENTRY_RAW(exc_invalid_op)
255255
if (!user_mode(regs) && handle_bug(regs))
256256
return;
257257

258-
rcu_exit = idtentry_enter_cond_rcu(regs);
258+
state = idtentry_enter(regs);
259259
instrumentation_begin();
260260
handle_invalid_op(regs);
261261
instrumentation_end();
262-
idtentry_exit_cond_rcu(regs, rcu_exit);
262+
idtentry_exit(regs, state);
263263
}
264264

265265
DEFINE_IDTENTRY(exc_coproc_segment_overrun)

arch/x86/mm/fault.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1377,7 +1377,7 @@ handle_page_fault(struct pt_regs *regs, unsigned long error_code,
13771377
DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
13781378
{
13791379
unsigned long address = read_cr2();
1380-
bool rcu_exit;
1380+
idtentry_state_t state;
13811381

13821382
prefetchw(&current->mm->mmap_lock);
13831383

@@ -1412,11 +1412,11 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
14121412
* code reenabled RCU to avoid subsequent wreckage which helps
14131413
* debugability.
14141414
*/
1415-
rcu_exit = idtentry_enter_cond_rcu(regs);
1415+
state = idtentry_enter(regs);
14161416

14171417
instrumentation_begin();
14181418
handle_page_fault(regs, error_code, address);
14191419
instrumentation_end();
14201420

1421-
idtentry_exit_cond_rcu(regs, rcu_exit);
1421+
idtentry_exit(regs, state);
14221422
}

0 commit comments

Comments
 (0)