@@ -169,8 +169,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
169
169
goto next ;
170
170
171
171
for_each_cpu_and (new_cpu , vector_cpumask , cpu_online_mask ) {
172
- if (per_cpu (vector_irq , new_cpu )[vector ] >
173
- VECTOR_UNDEFINED )
172
+ if (!IS_ERR_OR_NULL (per_cpu (vector_irq , new_cpu )[vector ]))
174
173
goto next ;
175
174
}
176
175
/* Found one! */
@@ -182,7 +181,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
182
181
cpumask_intersects (d -> old_domain , cpu_online_mask );
183
182
}
184
183
for_each_cpu_and (new_cpu , vector_cpumask , cpu_online_mask )
185
- per_cpu (vector_irq , new_cpu )[vector ] = irq ;
184
+ per_cpu (vector_irq , new_cpu )[vector ] = irq_to_desc ( irq ) ;
186
185
d -> cfg .vector = vector ;
187
186
cpumask_copy (d -> domain , vector_cpumask );
188
187
err = 0 ;
@@ -224,15 +223,16 @@ static int assign_irq_vector_policy(int irq, int node,
224
223
225
224
static void clear_irq_vector (int irq , struct apic_chip_data * data )
226
225
{
227
- int cpu , vector ;
226
+ struct irq_desc * desc ;
228
227
unsigned long flags ;
228
+ int cpu , vector ;
229
229
230
230
raw_spin_lock_irqsave (& vector_lock , flags );
231
231
BUG_ON (!data -> cfg .vector );
232
232
233
233
vector = data -> cfg .vector ;
234
234
for_each_cpu_and (cpu , data -> domain , cpu_online_mask )
235
- per_cpu (vector_irq , cpu )[vector ] = VECTOR_UNDEFINED ;
235
+ per_cpu (vector_irq , cpu )[vector ] = VECTOR_UNUSED ;
236
236
237
237
data -> cfg .vector = 0 ;
238
238
cpumask_clear (data -> domain );
@@ -242,12 +242,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
242
242
return ;
243
243
}
244
244
245
+ desc = irq_to_desc (irq );
245
246
for_each_cpu_and (cpu , data -> old_domain , cpu_online_mask ) {
246
247
for (vector = FIRST_EXTERNAL_VECTOR ; vector < NR_VECTORS ;
247
248
vector ++ ) {
248
- if (per_cpu (vector_irq , cpu )[vector ] != irq )
249
+ if (per_cpu (vector_irq , cpu )[vector ] != desc )
249
250
continue ;
250
- per_cpu (vector_irq , cpu )[vector ] = VECTOR_UNDEFINED ;
251
+ per_cpu (vector_irq , cpu )[vector ] = VECTOR_UNUSED ;
251
252
break ;
252
253
}
253
254
}
@@ -296,7 +297,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
296
297
struct irq_alloc_info * info = arg ;
297
298
struct apic_chip_data * data ;
298
299
struct irq_data * irq_data ;
299
- int i , err ;
300
+ int i , err , node ;
300
301
301
302
if (disable_apic )
302
303
return - ENXIO ;
@@ -308,12 +309,13 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
308
309
for (i = 0 ; i < nr_irqs ; i ++ ) {
309
310
irq_data = irq_domain_get_irq_data (domain , virq + i );
310
311
BUG_ON (!irq_data );
312
+ node = irq_data_get_node (irq_data );
311
313
#ifdef CONFIG_X86_IO_APIC
312
314
if (virq + i < nr_legacy_irqs () && legacy_irq_data [virq + i ])
313
315
data = legacy_irq_data [virq + i ];
314
316
else
315
317
#endif
316
- data = alloc_apic_chip_data (irq_data -> node );
318
+ data = alloc_apic_chip_data (node );
317
319
if (!data ) {
318
320
err = - ENOMEM ;
319
321
goto error ;
@@ -322,8 +324,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
322
324
irq_data -> chip = & lapic_controller ;
323
325
irq_data -> chip_data = data ;
324
326
irq_data -> hwirq = virq + i ;
325
- err = assign_irq_vector_policy (virq + i , irq_data -> node , data ,
326
- info );
327
+ err = assign_irq_vector_policy (virq + i , node , data , info );
327
328
if (err )
328
329
goto error ;
329
330
}
@@ -403,32 +404,32 @@ int __init arch_early_irq_init(void)
403
404
return arch_early_ioapic_init ();
404
405
}
405
406
407
+ /* Initialize vector_irq on a new cpu */
406
408
static void __setup_vector_irq (int cpu )
407
409
{
408
- /* Initialize vector_irq on a new cpu */
409
- int irq , vector ;
410
410
struct apic_chip_data * data ;
411
+ struct irq_desc * desc ;
412
+ int irq , vector ;
411
413
412
414
/* Mark the inuse vectors */
413
- for_each_active_irq (irq ) {
414
- data = apic_chip_data (irq_get_irq_data (irq ));
415
- if (!data )
416
- continue ;
415
+ for_each_irq_desc (irq , desc ) {
416
+ struct irq_data * idata = irq_desc_get_irq_data (desc );
417
417
418
- if (!cpumask_test_cpu (cpu , data -> domain ))
418
+ data = apic_chip_data (idata );
419
+ if (!data || !cpumask_test_cpu (cpu , data -> domain ))
419
420
continue ;
420
421
vector = data -> cfg .vector ;
421
- per_cpu (vector_irq , cpu )[vector ] = irq ;
422
+ per_cpu (vector_irq , cpu )[vector ] = desc ;
422
423
}
423
424
/* Mark the free vectors */
424
425
for (vector = 0 ; vector < NR_VECTORS ; ++ vector ) {
425
- irq = per_cpu (vector_irq , cpu )[vector ];
426
- if (irq <= VECTOR_UNDEFINED )
426
+ desc = per_cpu (vector_irq , cpu )[vector ];
427
+ if (IS_ERR_OR_NULL ( desc ) )
427
428
continue ;
428
429
429
- data = apic_chip_data (irq_get_irq_data ( irq ));
430
+ data = apic_chip_data (irq_desc_get_irq_data ( desc ));
430
431
if (!cpumask_test_cpu (cpu , data -> domain ))
431
- per_cpu (vector_irq , cpu )[vector ] = VECTOR_UNDEFINED ;
432
+ per_cpu (vector_irq , cpu )[vector ] = VECTOR_UNUSED ;
432
433
}
433
434
}
434
435
@@ -448,7 +449,7 @@ void setup_vector_irq(int cpu)
448
449
* legacy vector to irq mapping:
449
450
*/
450
451
for (irq = 0 ; irq < nr_legacy_irqs (); irq ++ )
451
- per_cpu (vector_irq , cpu )[ISA_IRQ_VECTOR (irq )] = irq ;
452
+ per_cpu (vector_irq , cpu )[ISA_IRQ_VECTOR (irq )] = irq_to_desc ( irq ) ;
452
453
453
454
__setup_vector_irq (cpu );
454
455
}
@@ -490,7 +491,8 @@ static int apic_set_affinity(struct irq_data *irq_data,
490
491
if (err ) {
491
492
struct irq_data * top = irq_get_irq_data (irq );
492
493
493
- if (assign_irq_vector (irq , data , top -> affinity ))
494
+ if (assign_irq_vector (irq , data ,
495
+ irq_data_get_affinity_mask (top )))
494
496
pr_err ("Failed to recover vector for irq %d\n" , irq );
495
497
return err ;
496
498
}
@@ -538,27 +540,30 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
538
540
539
541
entering_ack_irq ();
540
542
543
+ /* Prevent vectors vanishing under us */
544
+ raw_spin_lock (& vector_lock );
545
+
541
546
me = smp_processor_id ();
542
547
for (vector = FIRST_EXTERNAL_VECTOR ; vector < NR_VECTORS ; vector ++ ) {
543
- int irq ;
544
- unsigned int irr ;
545
- struct irq_desc * desc ;
546
548
struct apic_chip_data * data ;
549
+ struct irq_desc * desc ;
550
+ unsigned int irr ;
547
551
548
- irq = __this_cpu_read ( vector_irq [ vector ]);
549
-
550
- if (irq <= VECTOR_UNDEFINED )
552
+ retry :
553
+ desc = __this_cpu_read ( vector_irq [ vector ]);
554
+ if (IS_ERR_OR_NULL ( desc ) )
551
555
continue ;
552
556
553
- desc = irq_to_desc (irq );
554
- if (!desc )
555
- continue ;
557
+ if (!raw_spin_trylock (& desc -> lock )) {
558
+ raw_spin_unlock (& vector_lock );
559
+ cpu_relax ();
560
+ raw_spin_lock (& vector_lock );
561
+ goto retry ;
562
+ }
556
563
557
- data = apic_chip_data (& desc -> irq_data );
564
+ data = apic_chip_data (irq_desc_get_irq_data ( desc ) );
558
565
if (!data )
559
- continue ;
560
-
561
- raw_spin_lock (& desc -> lock );
566
+ goto unlock ;
562
567
563
568
/*
564
569
* Check if the irq migration is in progress. If so, we
@@ -583,11 +588,13 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
583
588
apic -> send_IPI_self (IRQ_MOVE_CLEANUP_VECTOR );
584
589
goto unlock ;
585
590
}
586
- __this_cpu_write (vector_irq [vector ], VECTOR_UNDEFINED );
591
+ __this_cpu_write (vector_irq [vector ], VECTOR_UNUSED );
587
592
unlock :
588
593
raw_spin_unlock (& desc -> lock );
589
594
}
590
595
596
+ raw_spin_unlock (& vector_lock );
597
+
591
598
exiting_irq ();
592
599
}
593
600
0 commit comments