24
24
*/
25
25
#include <linux/init.h>
26
26
#include <linux/kernel.h>
27
+ #include <linux/export.h>
27
28
#include <linux/list.h>
28
29
#include <linux/smp.h>
29
30
#include <linux/cpu_pm.h>
30
31
#include <linux/cpumask.h>
31
32
#include <linux/io.h>
33
+ #include <linux/irqdomain.h>
32
34
#include <linux/interrupt.h>
33
35
#include <linux/percpu.h>
34
36
#include <linux/slab.h>
@@ -75,16 +77,15 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d)
75
77
76
78
static inline unsigned int gic_irq (struct irq_data * d )
77
79
{
78
- struct gic_chip_data * gic_data = irq_data_get_irq_chip_data (d );
79
- return d -> irq - gic_data -> irq_offset ;
80
+ return d -> hwirq ;
80
81
}
81
82
82
83
/*
83
84
* Routines to acknowledge, disable and enable interrupts
84
85
*/
85
86
static void gic_mask_irq (struct irq_data * d )
86
87
{
87
- u32 mask = 1 << (d -> irq % 32 );
88
+ u32 mask = 1 << (gic_irq ( d ) % 32 );
88
89
89
90
spin_lock (& irq_controller_lock );
90
91
writel_relaxed (mask , gic_dist_base (d ) + GIC_DIST_ENABLE_CLEAR + (gic_irq (d ) / 32 ) * 4 );
@@ -95,7 +96,7 @@ static void gic_mask_irq(struct irq_data *d)
95
96
96
97
static void gic_unmask_irq (struct irq_data * d )
97
98
{
98
- u32 mask = 1 << (d -> irq % 32 );
99
+ u32 mask = 1 << (gic_irq ( d ) % 32 );
99
100
100
101
spin_lock (& irq_controller_lock );
101
102
if (gic_arch_extn .irq_unmask )
@@ -176,7 +177,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
176
177
bool force )
177
178
{
178
179
void __iomem * reg = gic_dist_base (d ) + GIC_DIST_TARGET + (gic_irq (d ) & ~3 );
179
- unsigned int shift = (d -> irq % 4 ) * 8 ;
180
+ unsigned int shift = (gic_irq ( d ) % 4 ) * 8 ;
180
181
unsigned int cpu = cpumask_any_and (mask_val , cpu_online_mask );
181
182
u32 val , mask , bit ;
182
183
@@ -227,7 +228,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
227
228
if (gic_irq == 1023 )
228
229
goto out ;
229
230
230
- cascade_irq = gic_irq + chip_data -> irq_offset ;
231
+ cascade_irq = irq_domain_to_irq ( & chip_data -> domain , gic_irq ) ;
231
232
if (unlikely (gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS ))
232
233
do_bad_IRQ (cascade_irq , desc );
233
234
else
@@ -259,14 +260,14 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
259
260
irq_set_chained_handler (irq , gic_handle_cascade_irq );
260
261
}
261
262
262
- static void __init gic_dist_init (struct gic_chip_data * gic ,
263
- unsigned int irq_start )
263
+ static void __init gic_dist_init (struct gic_chip_data * gic )
264
264
{
265
- unsigned int gic_irqs , irq_limit , i ;
265
+ unsigned int i , irq ;
266
266
u32 cpumask ;
267
+ unsigned int gic_irqs = gic -> gic_irqs ;
268
+ struct irq_domain * domain = & gic -> domain ;
267
269
void __iomem * base = gic -> dist_base ;
268
270
u32 cpu = 0 ;
269
- u32 nrppis = 0 , ppi_base = 0 ;
270
271
271
272
#ifdef CONFIG_SMP
272
273
cpu = cpu_logical_map (smp_processor_id ());
@@ -278,34 +279,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
278
279
279
280
writel_relaxed (0 , base + GIC_DIST_CTRL );
280
281
281
- /*
282
- * Find out how many interrupts are supported.
283
- * The GIC only supports up to 1020 interrupt sources.
284
- */
285
- gic_irqs = readl_relaxed (base + GIC_DIST_CTR ) & 0x1f ;
286
- gic_irqs = (gic_irqs + 1 ) * 32 ;
287
- if (gic_irqs > 1020 )
288
- gic_irqs = 1020 ;
289
-
290
- gic -> gic_irqs = gic_irqs ;
291
-
292
- /*
293
- * Nobody would be insane enough to use PPIs on a secondary
294
- * GIC, right?
295
- */
296
- if (gic == & gic_data [0 ]) {
297
- nrppis = (32 - irq_start ) & 31 ;
298
-
299
- /* The GIC only supports up to 16 PPIs. */
300
- if (nrppis > 16 )
301
- BUG ();
302
-
303
- ppi_base = gic -> irq_offset + 32 - nrppis ;
304
- }
305
-
306
- pr_info ("Configuring GIC with %d sources (%d PPIs)\n" ,
307
- gic_irqs , (gic == & gic_data [0 ]) ? nrppis : 0 );
308
-
309
282
/*
310
283
* Set all global interrupts to be level triggered, active low.
311
284
*/
@@ -331,30 +304,21 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
331
304
for (i = 32 ; i < gic_irqs ; i += 32 )
332
305
writel_relaxed (0xffffffff , base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32 );
333
306
334
- /*
335
- * Limit number of interrupts registered to the platform maximum
336
- */
337
- irq_limit = gic -> irq_offset + gic_irqs ;
338
- if (WARN_ON (irq_limit > NR_IRQS ))
339
- irq_limit = NR_IRQS ;
340
-
341
307
/*
342
308
* Setup the Linux IRQ subsystem.
343
309
*/
344
- for (i = 0 ; i < nrppis ; i ++ ) {
345
- int ppi = i + ppi_base ;
346
-
347
- irq_set_percpu_devid (ppi );
348
- irq_set_chip_and_handler (ppi , & gic_chip ,
349
- handle_percpu_devid_irq );
350
- irq_set_chip_data (ppi , gic );
351
- set_irq_flags (ppi , IRQF_VALID | IRQF_NOAUTOEN );
352
- }
353
-
354
- for (i = irq_start + nrppis ; i < irq_limit ; i ++ ) {
355
- irq_set_chip_and_handler (i , & gic_chip , handle_fasteoi_irq );
356
- irq_set_chip_data (i , gic );
357
- set_irq_flags (i , IRQF_VALID | IRQF_PROBE );
310
+ irq_domain_for_each_irq (domain , i , irq ) {
311
+ if (i < 32 ) {
312
+ irq_set_percpu_devid (irq );
313
+ irq_set_chip_and_handler (irq , & gic_chip ,
314
+ handle_percpu_devid_irq );
315
+ set_irq_flags (irq , IRQF_VALID | IRQF_NOAUTOEN );
316
+ } else {
317
+ irq_set_chip_and_handler (irq , & gic_chip ,
318
+ handle_fasteoi_irq );
319
+ set_irq_flags (irq , IRQF_VALID | IRQF_PROBE );
320
+ }
321
+ irq_set_chip_data (irq , gic );
358
322
}
359
323
360
324
writel_relaxed (1 , base + GIC_DIST_CTRL );
@@ -566,23 +530,53 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
566
530
}
567
531
#endif
568
532
533
+ const struct irq_domain_ops gic_irq_domain_ops = {
534
+ };
535
+
569
536
void __init gic_init (unsigned int gic_nr , unsigned int irq_start ,
570
537
void __iomem * dist_base , void __iomem * cpu_base )
571
538
{
572
539
struct gic_chip_data * gic ;
540
+ struct irq_domain * domain ;
541
+ int gic_irqs ;
573
542
574
543
BUG_ON (gic_nr >= MAX_GIC_NR );
575
544
576
545
gic = & gic_data [gic_nr ];
546
+ domain = & gic -> domain ;
577
547
gic -> dist_base = dist_base ;
578
548
gic -> cpu_base = cpu_base ;
579
- gic -> irq_offset = (irq_start - 1 ) & ~31 ;
580
549
581
- if (gic_nr == 0 )
550
+ /*
551
+ * For primary GICs, skip over SGIs.
552
+ * For secondary GICs, skip over PPIs, too.
553
+ */
554
+ if (gic_nr == 0 ) {
582
555
gic_cpu_base_addr = cpu_base ;
556
+ domain -> hwirq_base = 16 ;
557
+ irq_start = (irq_start & ~31 ) + 16 ;
558
+ } else
559
+ domain -> hwirq_base = 32 ;
560
+
561
+ /*
562
+ * Find out how many interrupts are supported.
563
+ * The GIC only supports up to 1020 interrupt sources.
564
+ */
565
+ gic_irqs = readl_relaxed (dist_base + GIC_DIST_CTR ) & 0x1f ;
566
+ gic_irqs = (gic_irqs + 1 ) * 32 ;
567
+ if (gic_irqs > 1020 )
568
+ gic_irqs = 1020 ;
569
+ gic -> gic_irqs = gic_irqs ;
570
+
571
+ domain -> nr_irq = gic_irqs - domain -> hwirq_base ;
572
+ domain -> irq_base = irq_alloc_descs (-1 , irq_start , domain -> nr_irq ,
573
+ numa_node_id ());
574
+ domain -> priv = gic ;
575
+ domain -> ops = & gic_irq_domain_ops ;
576
+ irq_domain_add (domain );
583
577
584
578
gic_chip .flags |= gic_arch_extn .flags ;
585
- gic_dist_init (gic , irq_start );
579
+ gic_dist_init (gic );
586
580
gic_cpu_init (gic );
587
581
gic_pm_init (gic );
588
582
}
0 commit comments