@@ -29,8 +29,7 @@ struct mlx5_irq {
29
29
char name [MLX5_MAX_IRQ_NAME ];
30
30
struct mlx5_irq_pool * pool ;
31
31
int refcount ;
32
- u32 index ;
33
- int irqn ;
32
+ struct msi_map map ;
34
33
};
35
34
36
35
struct mlx5_irq_table {
@@ -128,14 +127,14 @@ static void irq_release(struct mlx5_irq *irq)
128
127
{
129
128
struct mlx5_irq_pool * pool = irq -> pool ;
130
129
131
- xa_erase (& pool -> irqs , irq -> index );
130
+ xa_erase (& pool -> irqs , irq -> map . index );
132
131
/* free_irq requires that affinity_hint and rmap will be cleared
133
132
* before calling it. This is why there is asymmetry with set_rmap
134
133
* which should be called after alloc_irq but before request_irq.
135
134
*/
136
- irq_update_affinity_hint (irq -> irqn , NULL );
135
+ irq_update_affinity_hint (irq -> map . virq , NULL );
137
136
free_cpumask_var (irq -> mask );
138
- free_irq (irq -> irqn , & irq -> nh );
137
+ free_irq (irq -> map . virq , & irq -> nh );
139
138
kfree (irq );
140
139
}
141
140
@@ -217,15 +216,15 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
217
216
irq = kzalloc (sizeof (* irq ), GFP_KERNEL );
218
217
if (!irq )
219
218
return ERR_PTR (- ENOMEM );
220
- irq -> irqn = pci_irq_vector (dev -> pdev , i );
219
+ irq -> map . virq = pci_irq_vector (dev -> pdev , i );
221
220
if (!mlx5_irq_pool_is_sf_pool (pool ))
222
221
irq_set_name (pool , name , i );
223
222
else
224
223
irq_sf_set_name (pool , name , i );
225
224
ATOMIC_INIT_NOTIFIER_HEAD (& irq -> nh );
226
225
snprintf (irq -> name , MLX5_MAX_IRQ_NAME ,
227
226
"%s@pci:%s" , name , pci_name (dev -> pdev ));
228
- err = request_irq (irq -> irqn , irq_int_handler , 0 , irq -> name ,
227
+ err = request_irq (irq -> map . virq , irq_int_handler , 0 , irq -> name ,
229
228
& irq -> nh );
230
229
if (err ) {
231
230
mlx5_core_err (dev , "Failed to request irq. err = %d\n" , err );
@@ -238,23 +237,23 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
238
237
}
239
238
if (affinity ) {
240
239
cpumask_copy (irq -> mask , affinity );
241
- irq_set_affinity_and_hint (irq -> irqn , irq -> mask );
240
+ irq_set_affinity_and_hint (irq -> map . virq , irq -> mask );
242
241
}
243
242
irq -> pool = pool ;
244
243
irq -> refcount = 1 ;
245
- irq -> index = i ;
246
- err = xa_err (xa_store (& pool -> irqs , irq -> index , irq , GFP_KERNEL ));
244
+ irq -> map . index = i ;
245
+ err = xa_err (xa_store (& pool -> irqs , irq -> map . index , irq , GFP_KERNEL ));
247
246
if (err ) {
248
247
mlx5_core_err (dev , "Failed to alloc xa entry for irq(%u). err = %d\n" ,
249
- irq -> index , err );
248
+ irq -> map . index , err );
250
249
goto err_xa ;
251
250
}
252
251
return irq ;
253
252
err_xa :
254
- irq_update_affinity_hint (irq -> irqn , NULL );
253
+ irq_update_affinity_hint (irq -> map . virq , NULL );
255
254
free_cpumask_var (irq -> mask );
256
255
err_cpumask :
257
- free_irq (irq -> irqn , & irq -> nh );
256
+ free_irq (irq -> map . virq , & irq -> nh );
258
257
err_req_irq :
259
258
kfree (irq );
260
259
return ERR_PTR (err );
@@ -292,7 +291,7 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
292
291
293
292
int mlx5_irq_get_index (struct mlx5_irq * irq )
294
293
{
295
- return irq -> index ;
294
+ return irq -> map . index ;
296
295
}
297
296
298
297
/* irq_pool API */
@@ -364,7 +363,7 @@ static void mlx5_irqs_release(struct mlx5_irq **irqs, int nirqs)
364
363
int i ;
365
364
366
365
for (i = 0 ; i < nirqs ; i ++ ) {
367
- synchronize_irq (irqs [i ]-> irqn );
366
+ synchronize_irq (irqs [i ]-> map . virq );
368
367
mlx5_irq_put (irqs [i ]);
369
368
}
370
369
}
@@ -433,7 +432,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
433
432
if (IS_ERR (irq ))
434
433
return irq ;
435
434
mlx5_core_dbg (dev , "irq %u mapped to cpu %*pbl, %u EQs on this irq\n" ,
436
- irq -> irqn , cpumask_pr_args (affinity ),
435
+ irq -> map . virq , cpumask_pr_args (affinity ),
437
436
irq -> refcount / MLX5_EQ_REFS_PER_IRQ );
438
437
return irq ;
439
438
}
0 commit comments