Skip to content

Commit c285480

Browse files
npigginmpe
authored andcommitted
powerpc/64: Fix perf profiling asynchronous interrupt handlers
Interrupt entry sets the soft mask to IRQS_ALL_DISABLED to match the hard irq disabled state. So when should_hard_irq_enable() returns true because we want PMI interrupts in irq handlers, MSR[EE] is enabled but PMIs just get soft-masked. Fix this by clearing IRQS_PMI_DISABLED before enabling MSR[EE]. This also tidies some of the warnings, no need to duplicate them in both should_hard_irq_enable() and do_hard_irq_enable(). Signed-off-by: Nicholas Piggin <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent bc88ef6 commit c285480

File tree

4 files changed

+32
-15
lines changed

4 files changed

+32
-15
lines changed

arch/powerpc/include/asm/hw_irq.h

Lines changed: 29 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,15 @@ static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
173173
return flags;
174174
}
175175

176+
static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
177+
{
178+
unsigned long flags = irq_soft_mask_return();
179+
180+
irq_soft_mask_set(flags & ~mask);
181+
182+
return flags;
183+
}
184+
176185
static inline unsigned long arch_local_save_flags(void)
177186
{
178187
return irq_soft_mask_return();
@@ -331,10 +340,11 @@ bool power_pmu_wants_prompt_pmi(void);
331340
* is a different soft-masked interrupt pending that requires hard
332341
* masking.
333342
*/
334-
static inline bool should_hard_irq_enable(void)
343+
static inline bool should_hard_irq_enable(struct pt_regs *regs)
335344
{
336345
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
337-
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
346+
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
347+
WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
338348
WARN_ON(mfmsr() & MSR_EE);
339349
}
340350

@@ -347,8 +357,17 @@ static inline bool should_hard_irq_enable(void)
347357
*
348358
* TODO: Add test for 64e
349359
*/
350-
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
351-
return false;
360+
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
361+
if (!power_pmu_wants_prompt_pmi())
362+
return false;
363+
/*
364+
* If PMIs are disabled then IRQs should be disabled as well,
365+
* so we shouldn't see this condition, check for it just in
366+
* case because we are about to enable PMIs.
367+
*/
368+
if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
369+
return false;
370+
}
352371

353372
if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
354373
return false;
@@ -358,18 +377,16 @@ static inline bool should_hard_irq_enable(void)
358377

359378
/*
360379
* Do the hard enabling, only call this if should_hard_irq_enable is true.
380+
* This allows PMI interrupts to profile irq handlers.
361381
*/
362382
static inline void do_hard_irq_enable(void)
363383
{
364-
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
365-
WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
366-
WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
367-
WARN_ON(mfmsr() & MSR_EE);
368-
}
369384
/*
370-
* This allows PMI interrupts (and watchdog soft-NMIs) through.
371-
* There is no other reason to enable this way.
385+
* Asynch interrupts come in with IRQS_ALL_DISABLED,
386+
* PACA_IRQ_HARD_DIS, and MSR[EE]=0.
372387
*/
388+
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
389+
irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
373390
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
374391
__hard_irq_enable();
375392
}
@@ -452,7 +469,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
452469
return !(regs->msr & MSR_EE);
453470
}
454471

455-
static __always_inline bool should_hard_irq_enable(void)
472+
static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
456473
{
457474
return false;
458475
}

arch/powerpc/kernel/dbell.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
2727

2828
ppc_msgsync();
2929

30-
if (should_hard_irq_enable())
30+
if (should_hard_irq_enable(regs))
3131
do_hard_irq_enable();
3232

3333
kvmppc_clear_host_ipi(smp_processor_id());

arch/powerpc/kernel/irq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
238238
irq = static_call(ppc_get_irq)();
239239

240240
/* We can hard enable interrupts now to allow perf interrupts */
241-
if (should_hard_irq_enable())
241+
if (should_hard_irq_enable(regs))
242242
do_hard_irq_enable();
243243

244244
/* And finally process it */

arch/powerpc/kernel/time.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -515,7 +515,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
515515
}
516516

517517
/* Conditionally hard-enable interrupts. */
518-
if (should_hard_irq_enable()) {
518+
if (should_hard_irq_enable(regs)) {
519519
/*
520520
* Ensure a positive value is written to the decrementer, or
521521
* else some CPUs will continue to take decrementer exceptions.

0 commit comments

Comments
 (0)