@@ -206,7 +206,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
206
206
}
207
207
208
208
struct mlx5_irq * mlx5_irq_alloc (struct mlx5_irq_pool * pool , int i ,
209
- const struct cpumask * affinity )
209
+ struct irq_affinity_desc * af_desc )
210
210
{
211
211
struct mlx5_core_dev * dev = pool -> dev ;
212
212
char name [MLX5_MAX_IRQ_NAME ];
@@ -235,8 +235,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
235
235
err = - ENOMEM ;
236
236
goto err_cpumask ;
237
237
}
238
- if (affinity ) {
239
- cpumask_copy (irq -> mask , affinity );
238
+ if (af_desc ) {
239
+ cpumask_copy (irq -> mask , & af_desc -> mask );
240
240
irq_set_affinity_and_hint (irq -> map .virq , irq -> mask );
241
241
}
242
242
irq -> pool = pool ;
@@ -250,7 +250,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
250
250
}
251
251
return irq ;
252
252
err_xa :
253
- irq_update_affinity_hint (irq -> map .virq , NULL );
253
+ if (af_desc )
254
+ irq_update_affinity_hint (irq -> map .virq , NULL );
254
255
free_cpumask_var (irq -> mask );
255
256
err_cpumask :
256
257
free_irq (irq -> map .virq , & irq -> nh );
@@ -299,7 +300,7 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
299
300
/* requesting an irq from a given pool according to given index */
300
301
static struct mlx5_irq *
301
302
irq_pool_request_vector (struct mlx5_irq_pool * pool , int vecidx ,
302
- struct cpumask * affinity )
303
+ struct irq_affinity_desc * af_desc )
303
304
{
304
305
struct mlx5_irq * irq ;
305
306
@@ -309,7 +310,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
309
310
mlx5_irq_get_locked (irq );
310
311
goto unlock ;
311
312
}
312
- irq = mlx5_irq_alloc (pool , vecidx , affinity );
313
+ irq = mlx5_irq_alloc (pool , vecidx , af_desc );
313
314
unlock :
314
315
mutex_unlock (& pool -> lock );
315
316
return irq ;
@@ -386,28 +387,26 @@ void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
386
387
struct mlx5_irq * mlx5_ctrl_irq_request (struct mlx5_core_dev * dev )
387
388
{
388
389
struct mlx5_irq_pool * pool = ctrl_irq_pool_get (dev );
389
- cpumask_var_t req_mask ;
390
+ struct irq_affinity_desc af_desc ;
390
391
struct mlx5_irq * irq ;
391
392
392
- if (!zalloc_cpumask_var (& req_mask , GFP_KERNEL ))
393
- return ERR_PTR (- ENOMEM );
394
- cpumask_copy (req_mask , cpu_online_mask );
393
+ cpumask_copy (& af_desc .mask , cpu_online_mask );
394
+ af_desc .is_managed = false;
395
395
if (!mlx5_irq_pool_is_sf_pool (pool )) {
396
396
/* In case we are allocating a control IRQ from a pci device's pool.
397
397
* This can happen also for a SF if the SFs pool is empty.
398
398
*/
399
399
if (!pool -> xa_num_irqs .max ) {
400
- cpumask_clear (req_mask );
400
+ cpumask_clear (& af_desc . mask );
401
401
/* In case we only have a single IRQ for PF/VF */
402
- cpumask_set_cpu (cpumask_first (cpu_online_mask ), req_mask );
402
+ cpumask_set_cpu (cpumask_first (cpu_online_mask ), & af_desc . mask );
403
403
}
404
404
/* Allocate the IRQ in the last index of the pool */
405
- irq = irq_pool_request_vector (pool , pool -> xa_num_irqs .max , req_mask );
405
+ irq = irq_pool_request_vector (pool , pool -> xa_num_irqs .max , & af_desc );
406
406
} else {
407
- irq = mlx5_irq_affinity_request (pool , req_mask );
407
+ irq = mlx5_irq_affinity_request (pool , & af_desc );
408
408
}
409
409
410
- free_cpumask_var (req_mask );
411
410
return irq ;
412
411
}
413
412
@@ -416,23 +415,23 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
416
415
* @dev: mlx5 device that requesting the IRQ.
417
416
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
418
417
* provided.
419
- * @affinity: cpumask requested for this IRQ.
418
+ * @af_desc: affinity descriptor for this IRQ.
420
419
*
421
420
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
422
421
*/
423
422
struct mlx5_irq * mlx5_irq_request (struct mlx5_core_dev * dev , u16 vecidx ,
424
- struct cpumask * affinity )
423
+ struct irq_affinity_desc * af_desc )
425
424
{
426
425
struct mlx5_irq_table * irq_table = mlx5_irq_table_get (dev );
427
426
struct mlx5_irq_pool * pool ;
428
427
struct mlx5_irq * irq ;
429
428
430
429
pool = irq_table -> pf_pool ;
431
- irq = irq_pool_request_vector (pool , vecidx , affinity );
430
+ irq = irq_pool_request_vector (pool , vecidx , af_desc );
432
431
if (IS_ERR (irq ))
433
432
return irq ;
434
433
mlx5_core_dbg (dev , "irq %u mapped to cpu %*pbl, %u EQs on this irq\n" ,
435
- irq -> map .virq , cpumask_pr_args (affinity ),
434
+ irq -> map .virq , cpumask_pr_args (& af_desc -> mask ),
436
435
irq -> refcount / MLX5_EQ_REFS_PER_IRQ );
437
436
return irq ;
438
437
}
@@ -463,22 +462,20 @@ void mlx5_irqs_release_vectors(struct mlx5_irq **irqs, int nirqs)
463
462
int mlx5_irqs_request_vectors (struct mlx5_core_dev * dev , u16 * cpus , int nirqs ,
464
463
struct mlx5_irq * * irqs )
465
464
{
466
- cpumask_var_t req_mask ;
465
+ struct irq_affinity_desc af_desc ;
467
466
struct mlx5_irq * irq ;
468
467
int i ;
469
468
470
- if (!zalloc_cpumask_var (& req_mask , GFP_KERNEL ))
471
- return - ENOMEM ;
469
+ af_desc .is_managed = 1 ;
472
470
for (i = 0 ; i < nirqs ; i ++ ) {
473
- cpumask_set_cpu (cpus [i ], req_mask );
474
- irq = mlx5_irq_request (dev , i , req_mask );
471
+ cpumask_set_cpu (cpus [i ], & af_desc . mask );
472
+ irq = mlx5_irq_request (dev , i , & af_desc );
475
473
if (IS_ERR (irq ))
476
474
break ;
477
- cpumask_clear (req_mask );
475
+ cpumask_clear (& af_desc . mask );
478
476
irqs [i ] = irq ;
479
477
}
480
478
481
- free_cpumask_var (req_mask );
482
479
return i ? i : PTR_ERR (irq );
483
480
}
484
481
0 commit comments