Skip to content

Commit 59dc5bf

Browse files
npigginmpe
authored andcommitted
powerpc/64s: avoid reloading (H)SRR registers if they are still valid
When an interrupt is taken, the SRR registers are set to return to where it left off. Unless they are modified in the meantime, or the return address or MSR are modified, there is no need to reload these registers when returning from interrupt. Introduce per-CPU flags that track the validity of SRR and HSRR registers. These are cleared when returning from interrupt, when using the registers for something else (e.g., OPAL calls), when adjusting the return address or MSR of a context, and when context switching (which changes the return address and MSR). This improves the performance of interrupt returns. Signed-off-by: Nicholas Piggin <[email protected]> [mpe: Fold in fixup patch from Nick] Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 1df7d5e commit 59dc5bf

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+418
-179
lines changed

arch/powerpc/Kconfig.debug

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,10 @@ config MSI_BITMAP_SELFTEST
8585
config PPC_IRQ_SOFT_MASK_DEBUG
8686
bool "Include extra checks for powerpc irq soft masking"
8787

88+
config PPC_RFI_SRR_DEBUG
89+
bool "Include extra checks for RFI SRR register validity"
90+
depends on PPC_BOOK3S_64
91+
8892
config XMON
8993
bool "Include xmon kernel debugger"
9094
depends on DEBUG_KERNEL

arch/powerpc/include/asm/hw_irq.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -389,7 +389,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
389389
return !(regs->msr & MSR_EE);
390390
}
391391

392-
static inline void may_hard_irq_enable(void) { }
392+
static inline bool may_hard_irq_enable(void)
393+
{
394+
return false;
395+
}
396+
397+
static inline void do_hard_irq_enable(void)
398+
{
399+
BUILD_BUG();
400+
}
393401

394402
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
395403
{

arch/powerpc/include/asm/interrupt.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,13 +73,25 @@
7373
#include <asm/kprobes.h>
7474
#include <asm/runlatch.h>
7575

76+
#ifdef CONFIG_PPC_BOOK3S_64
77+
static inline void srr_regs_clobbered(void)
78+
{
79+
local_paca->srr_valid = 0;
80+
local_paca->hsrr_valid = 0;
81+
}
82+
#else
83+
static inline void srr_regs_clobbered(void)
84+
{
85+
}
86+
#endif
87+
7688
static inline void nap_adjust_return(struct pt_regs *regs)
7789
{
7890
#ifdef CONFIG_PPC_970_NAP
7991
if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
8092
/* Can avoid a test-and-clear because NMIs do not call this */
8193
clear_thread_local_flags(_TLF_NAPPING);
82-
regs->nip = (unsigned long)power4_idle_nap_return;
94+
regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
8395
}
8496
#endif
8597
}

arch/powerpc/include/asm/livepatch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
1616
{
1717
struct pt_regs *regs = ftrace_get_regs(fregs);
1818

19-
regs->nip = ip;
19+
regs_set_return_ip(regs, ip);
2020
}
2121

2222
#define klp_get_ftrace_location klp_get_ftrace_location

arch/powerpc/include/asm/paca.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,10 @@ struct paca_struct {
167167
u64 saved_msr; /* MSR saved here by enter_rtas */
168168
#ifdef CONFIG_PPC_BOOK3E
169169
u16 trap_save; /* Used when bad stack is encountered */
170+
#endif
171+
#ifdef CONFIG_PPC_BOOK3S_64
172+
u8 hsrr_valid; /* HSRRs set for HRFID */
173+
u8 srr_valid; /* SRRs set for RFID */
170174
#endif
171175
u8 irq_soft_mask; /* mask for irq soft masking */
172176
u8 irq_happened; /* irq happened while soft-disabled */

arch/powerpc/include/asm/probes.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t;
3434
/* Enable single stepping for the current task */
3535
static inline void enable_single_step(struct pt_regs *regs)
3636
{
37-
regs->msr |= MSR_SINGLESTEP;
37+
regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
3838
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3939
/*
4040
* We turn off Critical Input Exception(CE) to ensure that the single
4141
* step will be for the instruction we have the probe on; if we don't,
4242
* it is possible we'd get the single step reported for CE.
4343
*/
44-
regs->msr &= ~MSR_CE;
44+
regs_set_return_msr(regs, regs->msr & ~MSR_CE);
4545
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
4646
#ifdef CONFIG_PPC_47x
4747
isync();

arch/powerpc/include/asm/ptrace.h

Lines changed: 42 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,47 @@ struct pt_regs
123123
#endif /* __powerpc64__ */
124124

125125
#ifndef __ASSEMBLY__
126+
#include <asm/paca.h>
127+
128+
#ifdef CONFIG_SMP
129+
extern unsigned long profile_pc(struct pt_regs *regs);
130+
#else
131+
#define profile_pc(regs) instruction_pointer(regs)
132+
#endif
133+
134+
long do_syscall_trace_enter(struct pt_regs *regs);
135+
void do_syscall_trace_leave(struct pt_regs *regs);
136+
137+
static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
138+
{
139+
regs->nip = ip;
140+
#ifdef CONFIG_PPC_BOOK3S_64
141+
local_paca->hsrr_valid = 0;
142+
local_paca->srr_valid = 0;
143+
#endif
144+
}
145+
146+
static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
147+
{
148+
regs->msr = msr;
149+
#ifdef CONFIG_PPC_BOOK3S_64
150+
local_paca->hsrr_valid = 0;
151+
local_paca->srr_valid = 0;
152+
#endif
153+
}
154+
155+
static inline void set_return_regs_changed(void)
156+
{
157+
#ifdef CONFIG_PPC_BOOK3S_64
158+
local_paca->hsrr_valid = 0;
159+
local_paca->srr_valid = 0;
160+
#endif
161+
}
162+
163+
static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
164+
{
165+
regs_set_return_ip(regs, regs->nip + offset);
166+
}
126167

127168
static inline unsigned long instruction_pointer(struct pt_regs *regs)
128169
{
@@ -132,7 +173,7 @@ static inline unsigned long instruction_pointer(struct pt_regs *regs)
132173
static inline void instruction_pointer_set(struct pt_regs *regs,
133174
unsigned long val)
134175
{
135-
regs->nip = val;
176+
regs_set_return_ip(regs, val);
136177
}
137178

138179
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
@@ -145,15 +186,6 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
145186
return 0;
146187
}
147188

148-
#ifdef CONFIG_SMP
149-
extern unsigned long profile_pc(struct pt_regs *regs);
150-
#else
151-
#define profile_pc(regs) instruction_pointer(regs)
152-
#endif
153-
154-
long do_syscall_trace_enter(struct pt_regs *regs);
155-
void do_syscall_trace_leave(struct pt_regs *regs);
156-
157189
#ifdef __powerpc64__
158190
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
159191
#else

arch/powerpc/kernel/asm-offsets.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,10 @@ int main(void)
190190
OFFSET(PACATOC, paca_struct, kernel_toc);
191191
OFFSET(PACAKBASE, paca_struct, kernelbase);
192192
OFFSET(PACAKMSR, paca_struct, kernel_msr);
193+
#ifdef CONFIG_PPC_BOOK3S_64
194+
OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
195+
OFFSET(PACASRR_VALID, paca_struct, srr_valid);
196+
#endif
193197
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
194198
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
195199
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);

arch/powerpc/kernel/entry_64.S

Lines changed: 85 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,30 @@ exception_marker:
6464
.section ".text"
6565
.align 7
6666

67+
.macro DEBUG_SRR_VALID srr
68+
#ifdef CONFIG_PPC_RFI_SRR_DEBUG
69+
.ifc \srr,srr
70+
mfspr r11,SPRN_SRR0
71+
ld r12,_NIP(r1)
72+
100: tdne r11,r12
73+
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
74+
mfspr r11,SPRN_SRR1
75+
ld r12,_MSR(r1)
76+
100: tdne r11,r12
77+
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
78+
.else
79+
mfspr r11,SPRN_HSRR0
80+
ld r12,_NIP(r1)
81+
100: tdne r11,r12
82+
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
83+
mfspr r11,SPRN_HSRR1
84+
ld r12,_MSR(r1)
85+
100: tdne r11,r12
86+
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
87+
.endif
88+
#endif
89+
.endm
90+
6791
#ifdef CONFIG_PPC_BOOK3S
6892
.macro system_call_vectored name trapnr
6993
.globl system_call_vectored_\name
@@ -286,6 +310,11 @@ END_BTB_FLUSH_SECTION
286310
ld r11,exception_marker@toc(r2)
287311
std r11,-16(r10) /* "regshere" marker */
288312

313+
#ifdef CONFIG_PPC_BOOK3S
314+
li r11,1
315+
stb r11,PACASRR_VALID(r13)
316+
#endif
317+
289318
/*
290319
* We always enter kernel from userspace with irq soft-mask enabled and
291320
* nothing pending. system_call_exception() will call
@@ -306,18 +335,27 @@ END_BTB_FLUSH_SECTION
306335
bl syscall_exit_prepare
307336

308337
ld r2,_CCR(r1)
338+
ld r6,_LINK(r1)
339+
mtlr r6
340+
341+
#ifdef CONFIG_PPC_BOOK3S
342+
lbz r4,PACASRR_VALID(r13)
343+
cmpdi r4,0
344+
bne 1f
345+
li r4,0
346+
stb r4,PACASRR_VALID(r13)
347+
#endif
309348
ld r4,_NIP(r1)
310349
ld r5,_MSR(r1)
311-
ld r6,_LINK(r1)
350+
mtspr SPRN_SRR0,r4
351+
mtspr SPRN_SRR1,r5
352+
1:
353+
DEBUG_SRR_VALID srr
312354

313355
BEGIN_FTR_SECTION
314356
stdcx. r0,0,r1 /* to clear the reservation */
315357
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
316358

317-
mtspr SPRN_SRR0,r4
318-
mtspr SPRN_SRR1,r5
319-
mtlr r6
320-
321359
cmpdi r3,0
322360
bne .Lsyscall_restore_regs
323361
/* Zero volatile regs that may contain sensitive kernel data */
@@ -673,19 +711,40 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
673711
kuap_user_restore r3, r4
674712
#endif
675713
.Lfast_user_interrupt_return_\srr\():
676-
ld r11,_NIP(r1)
677-
ld r12,_MSR(r1)
714+
678715
BEGIN_FTR_SECTION
679716
ld r10,_PPR(r1)
680717
mtspr SPRN_PPR,r10
681718
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
719+
720+
#ifdef CONFIG_PPC_BOOK3S
721+
.ifc \srr,srr
722+
lbz r4,PACASRR_VALID(r13)
723+
.else
724+
lbz r4,PACAHSRR_VALID(r13)
725+
.endif
726+
cmpdi r4,0
727+
li r4,0
728+
bne 1f
729+
#endif
730+
ld r11,_NIP(r1)
731+
ld r12,_MSR(r1)
682732
.ifc \srr,srr
683733
mtspr SPRN_SRR0,r11
684734
mtspr SPRN_SRR1,r12
735+
1:
736+
#ifdef CONFIG_PPC_BOOK3S
737+
stb r4,PACASRR_VALID(r13)
738+
#endif
685739
.else
686740
mtspr SPRN_HSRR0,r11
687741
mtspr SPRN_HSRR1,r12
742+
1:
743+
#ifdef CONFIG_PPC_BOOK3S
744+
stb r4,PACAHSRR_VALID(r13)
745+
#endif
688746
.endif
747+
DEBUG_SRR_VALID \srr
689748

690749
BEGIN_FTR_SECTION
691750
stdcx. r0,0,r1 /* to clear the reservation */
@@ -730,15 +789,34 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
730789

731790
.Lfast_kernel_interrupt_return_\srr\():
732791
cmpdi cr1,r3,0
792+
#ifdef CONFIG_PPC_BOOK3S
793+
.ifc \srr,srr
794+
lbz r4,PACASRR_VALID(r13)
795+
.else
796+
lbz r4,PACAHSRR_VALID(r13)
797+
.endif
798+
cmpdi r4,0
799+
li r4,0
800+
bne 1f
801+
#endif
733802
ld r11,_NIP(r1)
734803
ld r12,_MSR(r1)
735804
.ifc \srr,srr
736805
mtspr SPRN_SRR0,r11
737806
mtspr SPRN_SRR1,r12
807+
1:
808+
#ifdef CONFIG_PPC_BOOK3S
809+
stb r4,PACASRR_VALID(r13)
810+
#endif
738811
.else
739812
mtspr SPRN_HSRR0,r11
740813
mtspr SPRN_HSRR1,r12
814+
1:
815+
#ifdef CONFIG_PPC_BOOK3S
816+
stb r4,PACAHSRR_VALID(r13)
817+
#endif
741818
.endif
819+
DEBUG_SRR_VALID \srr
742820

743821
BEGIN_FTR_SECTION
744822
stdcx. r0,0,r1 /* to clear the reservation */

arch/powerpc/kernel/exceptions-64s.S

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -485,6 +485,20 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
485485
std r0,GPR0(r1) /* save r0 in stackframe */
486486
std r10,GPR1(r1) /* save r1 in stackframe */
487487

488+
/* Mark our [H]SRRs valid for return */
489+
li r10,1
490+
.if IHSRR_IF_HVMODE
491+
BEGIN_FTR_SECTION
492+
stb r10,PACAHSRR_VALID(r13)
493+
FTR_SECTION_ELSE
494+
stb r10,PACASRR_VALID(r13)
495+
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
496+
.elseif IHSRR
497+
stb r10,PACAHSRR_VALID(r13)
498+
.else
499+
stb r10,PACASRR_VALID(r13)
500+
.endif
501+
488502
.if ISET_RI
489503
li r10,MSR_RI
490504
mtmsrd r10,1 /* Set MSR_RI */
@@ -584,10 +598,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
584598
.macro EXCEPTION_RESTORE_REGS hsrr=0
585599
/* Move original SRR0 and SRR1 into the respective regs */
586600
ld r9,_MSR(r1)
601+
li r10,0
587602
.if \hsrr
588603
mtspr SPRN_HSRR1,r9
604+
stb r10,PACAHSRR_VALID(r13)
589605
.else
590606
mtspr SPRN_SRR1,r9
607+
stb r10,PACASRR_VALID(r13)
591608
.endif
592609
ld r9,_NIP(r1)
593610
.if \hsrr
@@ -1718,6 +1735,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
17181735
*
17191736
* Be careful to avoid touching the kernel stack.
17201737
*/
1738+
li r10,0
1739+
stb r10,PACAHSRR_VALID(r13)
17211740
ld r10,PACA_EXGEN+EX_CTR(r13)
17221741
mtctr r10
17231742
mtcrf 0x80,r9
@@ -2513,6 +2532,8 @@ BEGIN_FTR_SECTION
25132532
ld r10,PACA_EXGEN+EX_CFAR(r13)
25142533
mtspr SPRN_CFAR,r10
25152534
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2535+
li r10,0
2536+
stb r10,PACAHSRR_VALID(r13)
25162537
ld r10,PACA_EXGEN+EX_R10(r13)
25172538
ld r11,PACA_EXGEN+EX_R11(r13)
25182539
ld r12,PACA_EXGEN+EX_R12(r13)
@@ -2673,6 +2694,12 @@ masked_interrupt:
26732694
ori r11,r11,PACA_IRQ_HARD_DIS
26742695
stb r11,PACAIRQHAPPENED(r13)
26752696
2: /* done */
2697+
li r10,0
2698+
.if \hsrr
2699+
stb r10,PACAHSRR_VALID(r13)
2700+
.else
2701+
stb r10,PACASRR_VALID(r13)
2702+
.endif
26762703
ld r10,PACA_EXGEN+EX_CTR(r13)
26772704
mtctr r10
26782705
mtcrf 0x80,r9

0 commit comments

Comments
 (0)