Skip to content

Commit ff0b0d6

Browse files
npigginmpe
authored andcommitted
powerpc/64s/interrupt: handle MSR EE and RI in interrupt entry wrapper
The mtmsrd to enable MSR[RI] can be combined with the mtmsrd to enable MSR[EE] in interrupt entry code, for those interrupts which enable EE. This helps performance of important synchronous interrupts (e.g., page faults). This is similar to what commit dd152f7 ("powerpc/64s: system call avoid setting MSR[RI] until we set MSR[EE]") does for system calls. Do this by enabling EE and RI together at the beginning of the entry wrapper if PACA_IRQ_HARD_DIS is clear, and only enabling RI if it is set. Asynchronous interrupts set PACA_IRQ_HARD_DIS, but synchronous ones leave it unchanged, so by default they always get EE=1 unless they have interrupted a caller that is hard disabled. When the sync interrupt later calls interrupt_cond_local_irq_enable(), it will not require another mtmsrd because MSR[EE] was already enabled here. This avoids one mtmsrd L=1 for synchronous interrupts on 64s, which saves about 20 cycles on POWER9. And for kernel-mode interrupts, both synchronous and asynchronous, this saves an additional 40 cycles due to the mtmsrd being moved ahead of mfspr SPRN_AMR, which prevents a SPR scoreboard stall. Signed-off-by: Nicholas Piggin <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 4423eb5 commit ff0b0d6

File tree

4 files changed

+42
-38
lines changed

4 files changed

+42
-38
lines changed

arch/powerpc/include/asm/interrupt.h

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -149,8 +149,14 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
149149
#endif
150150

151151
#ifdef CONFIG_PPC64
152-
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
153-
trace_hardirqs_off();
152+
bool trace_enable = false;
153+
154+
if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
155+
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
156+
trace_enable = true;
157+
} else {
158+
irq_soft_mask_set(IRQS_ALL_DISABLED);
159+
}
154160

155161
/*
156162
* If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
@@ -164,8 +170,14 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
164170
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
165171
BUG_ON(!(regs->msr & MSR_EE));
166172
__hard_irq_enable();
173+
} else {
174+
__hard_RI_enable();
167175
}
168176

177+
/* Do this when RI=1 because it can cause SLB faults */
178+
if (trace_enable)
179+
trace_hardirqs_off();
180+
169181
if (user_mode(regs)) {
170182
kuap_lock();
171183
CT_WARN_ON(ct_state() != CONTEXT_USER);
@@ -220,13 +232,16 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in
220232
/* Ensure interrupt_enter_prepare does not enable MSR[EE] */
221233
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
222234
#endif
235+
interrupt_enter_prepare(regs, state);
223236
#ifdef CONFIG_PPC_BOOK3S_64
237+
/*
238+
* RI=1 is set by interrupt_enter_prepare, so this thread flags access
239+
* has to come afterward (it can cause SLB faults).
240+
*/
224241
if (cpu_has_feature(CPU_FTR_CTRL) &&
225242
!test_thread_local_flags(_TLF_RUNLATCH))
226243
__ppc64_runlatch_on();
227244
#endif
228-
229-
interrupt_enter_prepare(regs, state);
230245
irq_enter();
231246
}
232247

@@ -296,6 +311,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
296311
regs->softe = IRQS_ALL_DISABLED;
297312
}
298313

314+
__hard_RI_enable();
315+
299316
/* Don't do any per-CPU operations until interrupt state is fixed */
300317

301318
if (nmi_disables_ftrace(regs)) {
@@ -393,6 +410,8 @@ interrupt_handler long func(struct pt_regs *regs) \
393410
{ \
394411
long ret; \
395412
\
413+
__hard_RI_enable(); \
414+
\
396415
ret = ____##func (regs); \
397416
\
398417
return ret; \

arch/powerpc/kernel/exceptions-64s.S

Lines changed: 4 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,6 @@ name:
113113
#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
114114
#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
115115
#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
116-
#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */
117116
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
118117
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
119118
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
@@ -157,9 +156,6 @@ do_define_int n
157156
.ifndef IDSISR
158157
IDSISR=0
159158
.endif
160-
.ifndef ISET_RI
161-
ISET_RI=1
162-
.endif
163159
.ifndef IBRANCH_TO_COMMON
164160
IBRANCH_TO_COMMON=1
165161
.endif
@@ -512,11 +508,6 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
512508
stb r10,PACASRR_VALID(r13)
513509
.endif
514510

515-
.if ISET_RI
516-
li r10,MSR_RI
517-
mtmsrd r10,1 /* Set MSR_RI */
518-
.endif
519-
520511
.if ISTACK
521512
.if IKUAP
522513
kuap_save_amr_and_lock r9, r10, cr1, cr0
@@ -900,11 +891,6 @@ INT_DEFINE_BEGIN(system_reset)
900891
IVEC=0x100
901892
IAREA=PACA_EXNMI
902893
IVIRT=0 /* no virt entry point */
903-
/*
904-
* MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
905-
* being used, so a nested NMI exception would corrupt it.
906-
*/
907-
ISET_RI=0
908894
ISTACK=0
909895
IKVM_REAL=1
910896
INT_DEFINE_END(system_reset)
@@ -977,16 +963,14 @@ TRAMP_REAL_BEGIN(system_reset_fwnmi)
977963
EXC_COMMON_BEGIN(system_reset_common)
978964
__GEN_COMMON_ENTRY system_reset
979965
/*
980-
* Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
981-
* to recover, but nested NMI will notice in_nmi and not recover
982-
* because of the use of the NMI stack. in_nmi reentrancy is tested in
983-
* system_reset_exception.
966+
* Increment paca->in_nmi. When the interrupt entry wrapper later
967+
* enable MSR_RI, then SLB or MCE will be able to recover, but a nested
968+
* NMI will notice in_nmi and not recover because of the use of the NMI
969+
* stack. in_nmi reentrancy is tested in system_reset_exception.
984970
*/
985971
lhz r10,PACA_IN_NMI(r13)
986972
addi r10,r10,1
987973
sth r10,PACA_IN_NMI(r13)
988-
li r10,MSR_RI
989-
mtmsrd r10,1
990974

991975
mr r10,r1
992976
ld r1,PACA_NMI_EMERG_SP(r13)
@@ -1060,12 +1044,6 @@ INT_DEFINE_BEGIN(machine_check_early)
10601044
IAREA=PACA_EXMC
10611045
IVIRT=0 /* no virt entry point */
10621046
IREALMODE_COMMON=1
1063-
/*
1064-
* MSR_RI is not enabled, because PACA_EXMC is being used, so a
1065-
* nested machine check corrupts it. machine_check_common enables
1066-
* MSR_RI.
1067-
*/
1068-
ISET_RI=0
10691047
ISTACK=0
10701048
IDAR=1
10711049
IDSISR=1
@@ -1076,7 +1054,6 @@ INT_DEFINE_BEGIN(machine_check)
10761054
IVEC=0x200
10771055
IAREA=PACA_EXMC
10781056
IVIRT=0 /* no virt entry point */
1079-
ISET_RI=0
10801057
IDAR=1
10811058
IDSISR=1
10821059
IKVM_REAL=1
@@ -1146,9 +1123,6 @@ EXC_COMMON_BEGIN(machine_check_early_common)
11461123
BEGIN_FTR_SECTION
11471124
bl enable_machine_check
11481125
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1149-
li r10,MSR_RI
1150-
mtmsrd r10,1
1151-
11521126
addi r3,r1,STACK_FRAME_OVERHEAD
11531127
bl machine_check_early
11541128
std r3,RESULT(r1) /* Save result */
@@ -1236,10 +1210,6 @@ EXC_COMMON_BEGIN(machine_check_common)
12361210
* save area: PACA_EXMC instead of PACA_EXGEN.
12371211
*/
12381212
GEN_COMMON machine_check
1239-
1240-
/* Enable MSR_RI when finished with PACA_EXMC */
1241-
li r10,MSR_RI
1242-
mtmsrd r10,1
12431213
addi r3,r1,STACK_FRAME_OVERHEAD
12441214
bl machine_check_exception_async
12451215
b interrupt_return_srr

arch/powerpc/kernel/fpu.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,12 @@ EXPORT_SYMBOL(store_fp_state)
8181
*/
8282
_GLOBAL(load_up_fpu)
8383
mfmsr r5
84+
#ifdef CONFIG_PPC_BOOK3S_64
85+
/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
86+
ori r5,r5,MSR_FP|MSR_RI
87+
#else
8488
ori r5,r5,MSR_FP
89+
#endif
8590
#ifdef CONFIG_VSX
8691
BEGIN_FTR_SECTION
8792
oris r5,r5,MSR_VSX@h

arch/powerpc/kernel/vector.S

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,10 @@ EXPORT_SYMBOL(store_vr_state)
4747
*/
4848
_GLOBAL(load_up_altivec)
4949
mfmsr r5 /* grab the current MSR */
50+
#ifdef CONFIG_PPC_BOOK3S_64
51+
/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
52+
ori r5,r5,MSR_RI
53+
#endif
5054
oris r5,r5,MSR_VEC@h
5155
MTMSRD(r5) /* enable use of AltiVec now */
5256
isync
@@ -126,6 +130,12 @@ _GLOBAL(load_up_vsx)
126130
andis. r5,r12,MSR_VEC@h
127131
beql+ load_up_altivec /* skip if already loaded */
128132

133+
#ifdef CONFIG_PPC_BOOK3S_64
134+
/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
135+
li r5,MSR_RI
136+
mtmsrd r5,1
137+
#endif
138+
129139
ld r4,PACACURRENT(r13)
130140
addi r4,r4,THREAD /* Get THREAD */
131141
li r6,1

0 commit comments

Comments
 (0)