@@ -36,6 +36,7 @@ static int nr_pics;
36
36
37
37
struct eiointc_priv {
38
38
u32 node ;
39
+ u32 vec_count ;
39
40
nodemask_t node_map ;
40
41
cpumask_t cpuspan_map ;
41
42
struct fwnode_handle * domain_handle ;
@@ -153,18 +154,18 @@ static int eiointc_router_init(unsigned int cpu)
153
154
if ((cpu_logical_map (cpu ) % CORES_PER_EIO_NODE ) == 0 ) {
154
155
eiointc_enable ();
155
156
156
- for (i = 0 ; i < VEC_COUNT / 32 ; i ++ ) {
157
+ for (i = 0 ; i < eiointc_priv [ 0 ] -> vec_count / 32 ; i ++ ) {
157
158
data = (((1 << (i * 2 + 1 )) << 16 ) | (1 << (i * 2 )));
158
159
iocsr_write32 (data , EIOINTC_REG_NODEMAP + i * 4 );
159
160
}
160
161
161
- for (i = 0 ; i < VEC_COUNT / 32 / 4 ; i ++ ) {
162
+ for (i = 0 ; i < eiointc_priv [ 0 ] -> vec_count / 32 / 4 ; i ++ ) {
162
163
bit = BIT (1 + index ); /* Route to IP[1 + index] */
163
164
data = bit | (bit << 8 ) | (bit << 16 ) | (bit << 24 );
164
165
iocsr_write32 (data , EIOINTC_REG_IPMAP + i * 4 );
165
166
}
166
167
167
- for (i = 0 ; i < VEC_COUNT / 4 ; i ++ ) {
168
+ for (i = 0 ; i < eiointc_priv [ 0 ] -> vec_count / 4 ; i ++ ) {
168
169
/* Route to Node-0 Core-0 */
169
170
if (index == 0 )
170
171
bit = BIT (cpu_logical_map (0 ));
@@ -175,7 +176,7 @@ static int eiointc_router_init(unsigned int cpu)
175
176
iocsr_write32 (data , EIOINTC_REG_ROUTE + i * 4 );
176
177
}
177
178
178
- for (i = 0 ; i < VEC_COUNT / 32 ; i ++ ) {
179
+ for (i = 0 ; i < eiointc_priv [ 0 ] -> vec_count / 32 ; i ++ ) {
179
180
data = 0xffffffff ;
180
181
iocsr_write32 (data , EIOINTC_REG_ENABLE + i * 4 );
181
182
iocsr_write32 (data , EIOINTC_REG_BOUNCE + i * 4 );
@@ -195,7 +196,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
195
196
196
197
chained_irq_enter (chip , desc );
197
198
198
- for (i = 0 ; i < VEC_REG_COUNT ; i ++ ) {
199
+ for (i = 0 ; i < eiointc_priv [ 0 ] -> vec_count / VEC_COUNT_PER_REG ; i ++ ) {
199
200
pending = iocsr_read64 (EIOINTC_REG_ISR + (i << 3 ));
200
201
iocsr_write64 (pending , EIOINTC_REG_ISR + (i << 3 ));
201
202
while (pending ) {
@@ -310,11 +311,11 @@ static void eiointc_resume(void)
310
311
eiointc_router_init (0 );
311
312
312
313
for (i = 0 ; i < nr_pics ; i ++ ) {
313
- for (j = 0 ; j < VEC_COUNT ; j ++ ) {
314
+ for (j = 0 ; j < eiointc_priv [ 0 ] -> vec_count ; j ++ ) {
314
315
desc = irq_resolve_mapping (eiointc_priv [i ]-> eiointc_domain , j );
315
316
if (desc && desc -> handle_irq && desc -> handle_irq != handle_bad_irq ) {
316
317
raw_spin_lock (& desc -> lock );
317
- irq_data = & desc -> irq_data ;
318
+ irq_data = irq_domain_get_irq_data ( eiointc_priv [ i ] -> eiointc_domain , irq_desc_get_irq ( desc )) ;
318
319
eiointc_set_irq_affinity (irq_data , irq_data -> common -> affinity , 0 );
319
320
raw_spin_unlock (& desc -> lock );
320
321
}
@@ -375,11 +376,47 @@ static int __init acpi_cascade_irqdomain_init(void)
375
376
return 0 ;
376
377
}
377
378
379
+ static int __init eiointc_init (struct eiointc_priv * priv , int parent_irq ,
380
+ u64 node_map )
381
+ {
382
+ int i ;
383
+
384
+ node_map = node_map ? node_map : -1ULL ;
385
+ for_each_possible_cpu (i ) {
386
+ if (node_map & (1ULL << (cpu_to_eio_node (i )))) {
387
+ node_set (cpu_to_eio_node (i ), priv -> node_map );
388
+ cpumask_or (& priv -> cpuspan_map , & priv -> cpuspan_map ,
389
+ cpumask_of (i ));
390
+ }
391
+ }
392
+
393
+ priv -> eiointc_domain = irq_domain_create_linear (priv -> domain_handle ,
394
+ priv -> vec_count ,
395
+ & eiointc_domain_ops ,
396
+ priv );
397
+ if (!priv -> eiointc_domain ) {
398
+ pr_err ("loongson-extioi: cannot add IRQ domain\n" );
399
+ return - ENOMEM ;
400
+ }
401
+
402
+ eiointc_priv [nr_pics ++ ] = priv ;
403
+ eiointc_router_init (0 );
404
+ irq_set_chained_handler_and_data (parent_irq , eiointc_irq_dispatch , priv );
405
+
406
+ if (nr_pics == 1 ) {
407
+ register_syscore_ops (& eiointc_syscore_ops );
408
+ cpuhp_setup_state_nocalls (CPUHP_AP_IRQ_LOONGARCH_STARTING ,
409
+ "irqchip/loongarch/intc:starting" ,
410
+ eiointc_router_init , NULL );
411
+ }
412
+
413
+ return 0 ;
414
+ }
415
+
378
416
int __init eiointc_acpi_init (struct irq_domain * parent ,
379
417
struct acpi_madt_eio_pic * acpi_eiointc )
380
418
{
381
- int i , ret , parent_irq ;
382
- unsigned long node_map ;
419
+ int parent_irq , ret ;
383
420
struct eiointc_priv * priv ;
384
421
int node ;
385
422
@@ -394,45 +431,25 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
394
431
goto out_free_priv ;
395
432
}
396
433
434
+ priv -> vec_count = VEC_COUNT ;
397
435
priv -> node = acpi_eiointc -> node ;
398
- node_map = acpi_eiointc -> node_map ? : -1ULL ;
399
-
400
- for_each_possible_cpu (i ) {
401
- if (node_map & (1ULL << cpu_to_eio_node (i ))) {
402
- node_set (cpu_to_eio_node (i ), priv -> node_map );
403
- cpumask_or (& priv -> cpuspan_map , & priv -> cpuspan_map , cpumask_of (i ));
404
- }
405
- }
406
-
407
- /* Setup IRQ domain */
408
- priv -> eiointc_domain = irq_domain_create_linear (priv -> domain_handle , VEC_COUNT ,
409
- & eiointc_domain_ops , priv );
410
- if (!priv -> eiointc_domain ) {
411
- pr_err ("loongson-eiointc: cannot add IRQ domain\n" );
412
- goto out_free_handle ;
413
- }
414
-
415
- eiointc_priv [nr_pics ++ ] = priv ;
416
-
417
- eiointc_router_init (0 );
418
436
419
437
parent_irq = irq_create_mapping (parent , acpi_eiointc -> cascade );
420
- irq_set_chained_handler_and_data (parent_irq , eiointc_irq_dispatch , priv );
421
438
422
- if (nr_pics == 1 ) {
423
- register_syscore_ops (& eiointc_syscore_ops );
424
- cpuhp_setup_state_nocalls (CPUHP_AP_IRQ_LOONGARCH_STARTING ,
425
- "irqchip/loongarch/intc:starting" ,
426
- eiointc_router_init , NULL );
427
- }
439
+ ret = eiointc_init (priv , parent_irq , acpi_eiointc -> node_map );
440
+ if (ret < 0 )
441
+ goto out_free_handle ;
428
442
429
443
if (cpu_has_flatmode )
430
444
node = cpu_to_node (acpi_eiointc -> node * CORES_PER_EIO_NODE );
431
445
else
432
446
node = acpi_eiointc -> node ;
433
447
acpi_set_vec_parent (node , priv -> eiointc_domain , pch_group );
434
448
acpi_set_vec_parent (node , priv -> eiointc_domain , msi_group );
449
+
435
450
ret = acpi_cascade_irqdomain_init ();
451
+ if (ret < 0 )
452
+ goto out_free_handle ;
436
453
437
454
return ret ;
438
455
@@ -444,3 +461,49 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
444
461
445
462
return - ENOMEM ;
446
463
}
464
+
465
+ static int __init eiointc_of_init (struct device_node * of_node ,
466
+ struct device_node * parent )
467
+ {
468
+ int parent_irq , ret ;
469
+ struct eiointc_priv * priv ;
470
+
471
+ priv = kzalloc (sizeof (* priv ), GFP_KERNEL );
472
+ if (!priv )
473
+ return - ENOMEM ;
474
+
475
+ parent_irq = irq_of_parse_and_map (of_node , 0 );
476
+ if (parent_irq <= 0 ) {
477
+ ret = - ENODEV ;
478
+ goto out_free_priv ;
479
+ }
480
+
481
+ ret = irq_set_handler_data (parent_irq , priv );
482
+ if (ret < 0 )
483
+ goto out_free_priv ;
484
+
485
+ /*
486
+ * In particular, the number of devices supported by the LS2K0500
487
+ * extended I/O interrupt vector is 128.
488
+ */
489
+ if (of_device_is_compatible (of_node , "loongson,ls2k0500-eiointc" ))
490
+ priv -> vec_count = 128 ;
491
+ else
492
+ priv -> vec_count = VEC_COUNT ;
493
+
494
+ priv -> node = 0 ;
495
+ priv -> domain_handle = of_node_to_fwnode (of_node );
496
+
497
+ ret = eiointc_init (priv , parent_irq , 0 );
498
+ if (ret < 0 )
499
+ goto out_free_priv ;
500
+
501
+ return 0 ;
502
+
503
+ out_free_priv :
504
+ kfree (priv );
505
+ return ret ;
506
+ }
507
+
508
+ IRQCHIP_DECLARE (loongson_ls2k0500_eiointc , "loongson,ls2k0500-eiointc" , eiointc_of_init );
509
+ IRQCHIP_DECLARE (loongson_ls2k2000_eiointc , "loongson,ls2k2000-eiointc" , eiointc_of_init );
0 commit comments