Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 9c15eeb

Browse files
jgowansMarc Zyngier
authored andcommitted
genirq: Allow fasteoi handler to resend interrupts on concurrent handling
There is a class of interrupt controllers out there that, once they have signalled a given interrupt number, will still signal incoming instances of the *same* interrupt despite the original interrupt not having been EOIed yet. As long as the new interrupt reaches the *same* CPU, nothing bad happens, as that CPU still has its interrupts globally disabled, and we will only take the new interrupt once the interrupt has been EOIed. However, things become more "interesting" if an affinity change comes in while the interrupt is being handled. More specifically, while the per-irq lock is being dropped. This results in the affinity change taking place immediately. At this point, there is nothing that prevents the interrupt from firing on the new target CPU. We end-up with the interrupt running concurrently on two CPUs, which isn't a good thing. And that's where things become worse: the new CPU notices that the interrupt handling is in progress (irq_may_run() return false), and *drops the interrupt on the floor*. The whole race looks like this: CPU 0 | CPU 1 -----------------------------|----------------------------- interrupt start | handle_fasteoi_irq | set_affinity(CPU 1) handler | ... | interrupt start ... | handle_fasteoi_irq -> early out handle_fasteoi_irq return | interrupt end interrupt end | If the interrupt was an edge, too bad. The interrupt is lost, and the system will eventually die one way or another. Not great. A way to avoid this situation is to detect this problem at the point we handle the interrupt on the new target. Instead of dropping the interrupt, use the resend mechanism to force it to be replayed. Also, in order to limit the impact of this workaround to the pathetic architectures that require it, gate it behind a new irq flag aptly named IRQD_RESEND_WHEN_IN_PROGRESS. Suggested-by: Marc Zyngier <[email protected]> Signed-off-by: James Gowans <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Marc Zyngier <[email protected]> Cc: KarimAllah Raslan <[email protected]> Cc: Yipeng Zou <[email protected]> Cc: Zhang Jianhua <[email protected]> [maz: reworded commit mesage] Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 7cc148a commit 9c15eeb

File tree

3 files changed

+30
-1
lines changed

3 files changed

+30
-1
lines changed

include/linux/irq.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,8 @@ struct irq_data {
223223
* irq_chip::irq_set_affinity() when deactivated.
224224
* IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
225225
* irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
226+
* IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which
227+
* case it must be resent at the next available opportunity.
226228
*/
227229
enum {
228230
IRQD_TRIGGER_MASK = 0xf,
@@ -249,6 +251,7 @@ enum {
249251
IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
250252
IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
251253
IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
254+
IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
252255
};
253256

254257
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -448,6 +451,16 @@ static inline bool irqd_affinity_on_activate(struct irq_data *d)
448451
return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
449452
}
450453

454+
static inline void irqd_set_resend_when_in_progress(struct irq_data *d)
455+
{
456+
__irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS;
457+
}
458+
459+
static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d)
460+
{
461+
return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS;
462+
}
463+
451464
#undef __irqd_to_state
452465

453466
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)

kernel/irq/chip.c

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -692,8 +692,16 @@ void handle_fasteoi_irq(struct irq_desc *desc)
692692

693693
raw_spin_lock(&desc->lock);
694694

695-
if (!irq_may_run(desc))
695+
/*
696+
* When an affinity change races with IRQ handling, the next interrupt
697+
* can arrive on the new CPU before the original CPU has completed
698+
* handling the previous one - it may need to be resent.
699+
*/
700+
if (!irq_may_run(desc)) {
701+
if (irqd_needs_resend_when_in_progress(&desc->irq_data))
702+
desc->istate |= IRQS_PENDING;
696703
goto out;
704+
}
697705

698706
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
699707

@@ -715,6 +723,12 @@ void handle_fasteoi_irq(struct irq_desc *desc)
715723

716724
cond_unmask_eoi_irq(desc, chip);
717725

726+
/*
727+
* When the race described above happens this will resend the interrupt.
728+
*/
729+
if (unlikely(desc->istate & IRQS_PENDING))
730+
check_irq_resend(desc, false);
731+
718732
raw_spin_unlock(&desc->lock);
719733
return;
720734
out:

kernel/irq/debugfs.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,8 @@ static const struct irq_bit_descr irqdata_states[] = {
133133
BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX),
134134

135135
BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND),
136+
137+
BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS),
136138
};
137139

138140
static const struct irq_bit_descr irqdesc_states[] = {

0 commit comments

Comments
 (0)