Skip to content

Commit bbac70c

Browse files
elic307iSaeed Mahameed
authored andcommitted
net/mlx5: Use newer affinity descriptor
Use the more refined struct irq_affinity_desc to describe the required IRQ affinity. For the async IRQs request unmanaged affinity and for completion queues use managed affinity. No functionality changes introduced. It will be used in a subsequent patch when we use dynamic MSIX allocation. Signed-off-by: Eli Cohen <[email protected]> Reviewed-by: Shay Drory <[email protected]> Signed-off-by: Saeed Mahameed <[email protected]> Reviewed-by: Jacob Keller <[email protected]>
1 parent 235a25f commit bbac70c

File tree

4 files changed

+44
-52
lines changed

4 files changed

+44
-52
lines changed

drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c

Lines changed: 17 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -45,30 +45,27 @@ static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
4545

4646
/* Creating an IRQ from irq_pool */
4747
static struct mlx5_irq *
48-
irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
48+
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
4949
{
50-
cpumask_var_t auto_mask;
51-
struct mlx5_irq *irq;
50+
struct irq_affinity_desc auto_desc = {};
5251
u32 irq_index;
5352
int err;
5453

55-
if (!zalloc_cpumask_var(&auto_mask, GFP_KERNEL))
56-
return ERR_PTR(-ENOMEM);
5754
err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs, GFP_KERNEL);
5855
if (err)
5956
return ERR_PTR(err);
6057
if (pool->irqs_per_cpu) {
61-
if (cpumask_weight(req_mask) > 1)
58+
if (cpumask_weight(&af_desc->mask) > 1)
6259
/* if req_mask contain more then one CPU, set the least loadad CPU
6360
* of req_mask
6461
*/
65-
cpumask_set_cpu(cpu_get_least_loaded(pool, req_mask), auto_mask);
62+
cpumask_set_cpu(cpu_get_least_loaded(pool, &af_desc->mask),
63+
&auto_desc.mask);
6664
else
67-
cpu_get(pool, cpumask_first(req_mask));
65+
cpu_get(pool, cpumask_first(&af_desc->mask));
6866
}
69-
irq = mlx5_irq_alloc(pool, irq_index, cpumask_empty(auto_mask) ? req_mask : auto_mask);
70-
free_cpumask_var(auto_mask);
71-
return irq;
67+
return mlx5_irq_alloc(pool, irq_index,
68+
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc);
7269
}
7370

7471
/* Looking for the IRQ with the smallest refcount that fits req_mask.
@@ -115,22 +112,22 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
115112
/**
116113
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
117114
* @pool: IRQ pool to request from.
118-
* @req_mask: cpumask requested for this IRQ.
115+
* @af_desc: affinity descriptor for this IRQ.
119116
*
120117
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
121118
*/
122119
struct mlx5_irq *
123-
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
120+
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
124121
{
125122
struct mlx5_irq *least_loaded_irq, *new_irq;
126123

127124
mutex_lock(&pool->lock);
128-
least_loaded_irq = irq_pool_find_least_loaded(pool, req_mask);
125+
least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
129126
if (least_loaded_irq &&
130127
mlx5_irq_read_locked(least_loaded_irq) < pool->min_threshold)
131128
goto out;
132129
/* We didn't find an IRQ with less than min_thres, try to allocate a new IRQ */
133-
new_irq = irq_pool_request_irq(pool, req_mask);
130+
new_irq = irq_pool_request_irq(pool, af_desc);
134131
if (IS_ERR(new_irq)) {
135132
if (!least_loaded_irq) {
136133
/* We failed to create an IRQ and we didn't find an IRQ */
@@ -194,16 +191,15 @@ int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
194191
struct mlx5_irq **irqs)
195192
{
196193
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
197-
cpumask_var_t req_mask;
194+
struct irq_affinity_desc af_desc = {};
198195
struct mlx5_irq *irq;
199196
int i = 0;
200197

201-
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
202-
return -ENOMEM;
203-
cpumask_copy(req_mask, cpu_online_mask);
198+
af_desc.is_managed = 1;
199+
cpumask_copy(&af_desc.mask, cpu_online_mask);
204200
for (i = 0; i < nirqs; i++) {
205201
if (mlx5_irq_pool_is_sf_pool(pool))
206-
irq = mlx5_irq_affinity_request(pool, req_mask);
202+
irq = mlx5_irq_affinity_request(pool, &af_desc);
207203
else
208204
/* In case SF pool doesn't exists, fallback to the PF IRQs.
209205
* The PF IRQs are already allocated and binded to CPU
@@ -213,13 +209,12 @@ int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
213209
if (IS_ERR(irq))
214210
break;
215211
irqs[i] = irq;
216-
cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), req_mask);
212+
cpumask_clear_cpu(cpumask_first(mlx5_irq_get_affinity_mask(irq)), &af_desc.mask);
217213
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
218214
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
219215
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
220216
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
221217
}
222-
free_cpumask_var(req_mask);
223218
if (!i)
224219
return PTR_ERR(irq);
225220
return i;

drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
2525
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
2626
void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
2727
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
28-
struct cpumask *affinity);
28+
struct irq_affinity_desc *af_desc);
2929
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
3030
struct mlx5_irq **irqs);
3131
void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs);
@@ -39,7 +39,7 @@ struct mlx5_irq_pool;
3939
int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev, int nirqs,
4040
struct mlx5_irq **irqs);
4141
struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
42-
const struct cpumask *req_mask);
42+
struct irq_affinity_desc *af_desc);
4343
void mlx5_irq_affinity_irqs_release(struct mlx5_core_dev *dev, struct mlx5_irq **irqs,
4444
int num_irqs);
4545
#else
@@ -50,7 +50,7 @@ static inline int mlx5_irq_affinity_irqs_request_auto(struct mlx5_core_dev *dev,
5050
}
5151

5252
static inline struct mlx5_irq *
53-
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
53+
mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
5454
{
5555
return ERR_PTR(-EOPNOTSUPP);
5656
}

drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c

Lines changed: 23 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
206206
}
207207

208208
struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
209-
const struct cpumask *affinity)
209+
struct irq_affinity_desc *af_desc)
210210
{
211211
struct mlx5_core_dev *dev = pool->dev;
212212
char name[MLX5_MAX_IRQ_NAME];
@@ -235,8 +235,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
235235
err = -ENOMEM;
236236
goto err_cpumask;
237237
}
238-
if (affinity) {
239-
cpumask_copy(irq->mask, affinity);
238+
if (af_desc) {
239+
cpumask_copy(irq->mask, &af_desc->mask);
240240
irq_set_affinity_and_hint(irq->map.virq, irq->mask);
241241
}
242242
irq->pool = pool;
@@ -250,7 +250,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
250250
}
251251
return irq;
252252
err_xa:
253-
irq_update_affinity_hint(irq->map.virq, NULL);
253+
if (af_desc)
254+
irq_update_affinity_hint(irq->map.virq, NULL);
254255
free_cpumask_var(irq->mask);
255256
err_cpumask:
256257
free_irq(irq->map.virq, &irq->nh);
@@ -299,7 +300,7 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
299300
/* requesting an irq from a given pool according to given index */
300301
static struct mlx5_irq *
301302
irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
302-
struct cpumask *affinity)
303+
struct irq_affinity_desc *af_desc)
303304
{
304305
struct mlx5_irq *irq;
305306

@@ -309,7 +310,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
309310
mlx5_irq_get_locked(irq);
310311
goto unlock;
311312
}
312-
irq = mlx5_irq_alloc(pool, vecidx, affinity);
313+
irq = mlx5_irq_alloc(pool, vecidx, af_desc);
313314
unlock:
314315
mutex_unlock(&pool->lock);
315316
return irq;
@@ -386,28 +387,26 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
386387
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
387388
{
388389
struct mlx5_irq_pool *pool = ctrl_irq_pool_get(dev);
389-
cpumask_var_t req_mask;
390+
struct irq_affinity_desc af_desc;
390391
struct mlx5_irq *irq;
391392

392-
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
393-
return ERR_PTR(-ENOMEM);
394-
cpumask_copy(req_mask, cpu_online_mask);
393+
cpumask_copy(&af_desc.mask, cpu_online_mask);
394+
af_desc.is_managed = false;
395395
if (!mlx5_irq_pool_is_sf_pool(pool)) {
396396
/* In case we are allocating a control IRQ from a pci device's pool.
397397
* This can happen also for a SF if the SFs pool is empty.
398398
*/
399399
if (!pool->xa_num_irqs.max) {
400-
cpumask_clear(req_mask);
400+
cpumask_clear(&af_desc.mask);
401401
/* In case we only have a single IRQ for PF/VF */
402-
cpumask_set_cpu(cpumask_first(cpu_online_mask), req_mask);
402+
cpumask_set_cpu(cpumask_first(cpu_online_mask), &af_desc.mask);
403403
}
404404
/* Allocate the IRQ in the last index of the pool */
405-
irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, req_mask);
405+
irq = irq_pool_request_vector(pool, pool->xa_num_irqs.max, &af_desc);
406406
} else {
407-
irq = mlx5_irq_affinity_request(pool, req_mask);
407+
irq = mlx5_irq_affinity_request(pool, &af_desc);
408408
}
409409

410-
free_cpumask_var(req_mask);
411410
return irq;
412411
}
413412

@@ -416,23 +415,23 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
416415
* @dev: mlx5 device that requesting the IRQ.
417416
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
418417
* provided.
419-
* @affinity: cpumask requested for this IRQ.
418+
* @af_desc: affinity descriptor for this IRQ.
420419
*
421420
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
422421
*/
423422
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
424-
struct cpumask *affinity)
423+
struct irq_affinity_desc *af_desc)
425424
{
426425
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
427426
struct mlx5_irq_pool *pool;
428427
struct mlx5_irq *irq;
429428

430429
pool = irq_table->pf_pool;
431-
irq = irq_pool_request_vector(pool, vecidx, affinity);
430+
irq = irq_pool_request_vector(pool, vecidx, af_desc);
432431
if (IS_ERR(irq))
433432
return irq;
434433
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
435-
irq->map.virq, cpumask_pr_args(affinity),
434+
irq->map.virq, cpumask_pr_args(&af_desc->mask),
436435
irq->refcount / MLX5_EQ_REFS_PER_IRQ);
437436
return irq;
438437
}
@@ -463,22 +462,20 @@ void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
463462
int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
464463
struct mlx5_irq **irqs)
465464
{
466-
cpumask_var_t req_mask;
465+
struct irq_affinity_desc af_desc;
467466
struct mlx5_irq *irq;
468467
int i;
469468

470-
if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL))
471-
return -ENOMEM;
469+
af_desc.is_managed = 1;
472470
for (i = 0; i < nirqs; i++) {
473-
cpumask_set_cpu(cpus[i], req_mask);
474-
irq = mlx5_irq_request(dev, i, req_mask);
471+
cpumask_set_cpu(cpus[i], &af_desc.mask);
472+
irq = mlx5_irq_request(dev, i, &af_desc);
475473
if (IS_ERR(irq))
476474
break;
477-
cpumask_clear(req_mask);
475+
cpumask_clear(&af_desc.mask);
478476
irqs[i] = irq;
479477
}
480478

481-
free_cpumask_var(req_mask);
482479
return i ? i : PTR_ERR(irq);
483480
}
484481

drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
3131
}
3232

3333
struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
34-
const struct cpumask *affinity);
34+
struct irq_affinity_desc *af_desc);
3535
int mlx5_irq_get_locked(struct mlx5_irq *irq);
3636
int mlx5_irq_read_locked(struct mlx5_irq *irq);
3737
int mlx5_irq_put(struct mlx5_irq *irq);

0 commit comments

Comments
 (0)