Skip to content

Commit 45ddcec

Browse files
committed
genirq: Use affinity hint in irqdesc allocation
Use the affinity hint in the irqdesc allocator. The hint is used to determine the node for the allocation and to set the affinity of the interrupt. If multiple interrupts are allocated (multi-MSI) then the allocator iterates over the cpumask and for each set cpu it allocates on their node and sets the initial affinity to that cpu. If a single interrupt is allocated (MSI-X) then the allocator uses the first cpu in the mask to compute the allocation node and uses the mask for the initial affinity setting. Interrupts set up this way are marked with the AFFINITY_MANAGED flag to prevent userspace from messing with their affinity settings. Signed-off-by: Thomas Gleixner <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Thomas Gleixner <[email protected]>
1 parent 06ee6d5 commit 45ddcec

File tree

1 file changed

+39
-12
lines changed

1 file changed

+39
-12
lines changed

kernel/irq/irqdesc.c

Lines changed: 39 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -68,9 +68,13 @@ static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
6868
return 0;
6969
}
7070

71-
static void desc_smp_init(struct irq_desc *desc, int node)
71+
static void desc_smp_init(struct irq_desc *desc, int node,
72+
const struct cpumask *affinity)
7273
{
73-
cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
74+
if (!affinity)
75+
affinity = irq_default_affinity;
76+
cpumask_copy(desc->irq_common_data.affinity, affinity);
77+
7478
#ifdef CONFIG_GENERIC_PENDING_IRQ
7579
cpumask_clear(desc->pending_mask);
7680
#endif
@@ -82,11 +86,12 @@ static void desc_smp_init(struct irq_desc *desc, int node)
8286
#else
8387
static inline int
8488
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
85-
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
89+
static inline void
90+
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
8691
#endif
8792

8893
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
89-
struct module *owner)
94+
const struct cpumask *affinity, struct module *owner)
9095
{
9196
int cpu;
9297

@@ -107,7 +112,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
107112
desc->owner = owner;
108113
for_each_possible_cpu(cpu)
109114
*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
110-
desc_smp_init(desc, node);
115+
desc_smp_init(desc, node, affinity);
111116
}
112117

113118
int nr_irqs = NR_IRQS;
@@ -158,7 +163,9 @@ void irq_unlock_sparse(void)
158163
mutex_unlock(&sparse_irq_lock);
159164
}
160165

161-
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
166+
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
167+
const struct cpumask *affinity,
168+
struct module *owner)
162169
{
163170
struct irq_desc *desc;
164171
gfp_t gfp = GFP_KERNEL;
@@ -178,7 +185,8 @@ static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
178185
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
179186
init_rcu_head(&desc->rcu);
180187

181-
desc_set_defaults(irq, desc, node, owner);
188+
desc_set_defaults(irq, desc, node, affinity, owner);
189+
irqd_set(&desc->irq_data, flags);
182190

183191
return desc;
184192

@@ -225,11 +233,30 @@ static void free_desc(unsigned int irq)
225233
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
226234
const struct cpumask *affinity, struct module *owner)
227235
{
236+
const struct cpumask *mask = NULL;
228237
struct irq_desc *desc;
229-
int i;
238+
unsigned int flags;
239+
int i, cpu = -1;
240+
241+
if (affinity && cpumask_empty(affinity))
242+
return -EINVAL;
243+
244+
flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
230245

231246
for (i = 0; i < cnt; i++) {
232-
desc = alloc_desc(start + i, node, owner);
247+
if (affinity) {
248+
cpu = cpumask_next(cpu, affinity);
249+
if (cpu >= nr_cpu_ids)
250+
cpu = cpumask_first(affinity);
251+
node = cpu_to_node(cpu);
252+
253+
/*
254+
* For single allocations we use the caller provided
255+
* mask otherwise we use the mask of the target cpu
256+
*/
257+
mask = cnt == 1 ? affinity : cpumask_of(cpu);
258+
}
259+
desc = alloc_desc(start + i, node, flags, mask, owner);
233260
if (!desc)
234261
goto err;
235262
mutex_lock(&sparse_irq_lock);
@@ -277,7 +304,7 @@ int __init early_irq_init(void)
277304
nr_irqs = initcnt;
278305

279306
for (i = 0; i < initcnt; i++) {
280-
desc = alloc_desc(i, node, NULL);
307+
desc = alloc_desc(i, node, 0, NULL, NULL);
281308
set_bit(i, allocated_irqs);
282309
irq_insert_desc(i, desc);
283310
}
@@ -311,7 +338,7 @@ int __init early_irq_init(void)
311338
alloc_masks(&desc[i], GFP_KERNEL, node);
312339
raw_spin_lock_init(&desc[i].lock);
313340
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
314-
desc_set_defaults(i, &desc[i], node, NULL);
341+
desc_set_defaults(i, &desc[i], node, NULL, NULL);
315342
}
316343
return arch_early_irq_init();
317344
}
@@ -328,7 +355,7 @@ static void free_desc(unsigned int irq)
328355
unsigned long flags;
329356

330357
raw_spin_lock_irqsave(&desc->lock, flags);
331-
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL);
358+
desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
332359
raw_spin_unlock_irqrestore(&desc->lock, flags);
333360
}
334361

0 commit comments

Comments
 (0)