25
25
#include <linux/platform_device.h>
26
26
#include <linux/slab.h>
27
27
#include <linux/spinlock.h>
28
+ #include <linux/irq.h>
29
+ #include <linux/irqdesc.h>
28
30
29
31
#include <asm/cputype.h>
30
32
#include <asm/irq_regs.h>
33
35
/* Set at runtime when we know what CPU type we are. */
34
36
static struct arm_pmu * cpu_pmu ;
35
37
38
+ static DEFINE_PER_CPU (struct arm_pmu * , percpu_pmu ) ;
36
39
static DEFINE_PER_CPU (struct perf_event * [ARMPMU_MAX_HWEVENTS ], hw_events ) ;
37
40
static DEFINE_PER_CPU (unsigned long [BITS_TO_LONGS (ARMPMU_MAX_HWEVENTS )], used_mask ) ;
38
41
static DEFINE_PER_CPU (struct pmu_hw_events , cpu_hw_events ) ;
@@ -71,19 +74,45 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
71
74
return this_cpu_ptr (& cpu_hw_events );
72
75
}
73
76
77
+ static void cpu_pmu_enable_percpu_irq (void * data )
78
+ {
79
+ struct arm_pmu * cpu_pmu = data ;
80
+ struct platform_device * pmu_device = cpu_pmu -> plat_device ;
81
+ int irq = platform_get_irq (pmu_device , 0 );
82
+
83
+ enable_percpu_irq (irq , IRQ_TYPE_NONE );
84
+ cpumask_set_cpu (smp_processor_id (), & cpu_pmu -> active_irqs );
85
+ }
86
+
87
+ static void cpu_pmu_disable_percpu_irq (void * data )
88
+ {
89
+ struct arm_pmu * cpu_pmu = data ;
90
+ struct platform_device * pmu_device = cpu_pmu -> plat_device ;
91
+ int irq = platform_get_irq (pmu_device , 0 );
92
+
93
+ cpumask_clear_cpu (smp_processor_id (), & cpu_pmu -> active_irqs );
94
+ disable_percpu_irq (irq );
95
+ }
96
+
74
97
static void cpu_pmu_free_irq (struct arm_pmu * cpu_pmu )
75
98
{
76
99
int i , irq , irqs ;
77
100
struct platform_device * pmu_device = cpu_pmu -> plat_device ;
78
101
79
102
irqs = min (pmu_device -> num_resources , num_possible_cpus ());
80
103
81
- for (i = 0 ; i < irqs ; ++ i ) {
82
- if (!cpumask_test_and_clear_cpu (i , & cpu_pmu -> active_irqs ))
83
- continue ;
84
- irq = platform_get_irq (pmu_device , i );
85
- if (irq >= 0 )
86
- free_irq (irq , cpu_pmu );
104
+ irq = platform_get_irq (pmu_device , 0 );
105
+ if (irq >= 0 && irq_is_percpu (irq )) {
106
+ on_each_cpu (cpu_pmu_disable_percpu_irq , cpu_pmu , 1 );
107
+ free_percpu_irq (irq , & percpu_pmu );
108
+ } else {
109
+ for (i = 0 ; i < irqs ; ++ i ) {
110
+ if (!cpumask_test_and_clear_cpu (i , & cpu_pmu -> active_irqs ))
111
+ continue ;
112
+ irq = platform_get_irq (pmu_device , i );
113
+ if (irq >= 0 )
114
+ free_irq (irq , cpu_pmu );
115
+ }
87
116
}
88
117
}
89
118
@@ -101,33 +130,44 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
101
130
return - ENODEV ;
102
131
}
103
132
104
- for (i = 0 ; i < irqs ; ++ i ) {
105
- err = 0 ;
106
- irq = platform_get_irq (pmu_device , i );
107
- if (irq < 0 )
108
- continue ;
109
-
110
- /*
111
- * If we have a single PMU interrupt that we can't shift,
112
- * assume that we're running on a uniprocessor machine and
113
- * continue. Otherwise, continue without this interrupt.
114
- */
115
- if (irq_set_affinity (irq , cpumask_of (i )) && irqs > 1 ) {
116
- pr_warning ("unable to set irq affinity (irq=%d, cpu=%u)\n" ,
117
- irq , i );
118
- continue ;
119
- }
120
-
121
- err = request_irq (irq , handler ,
122
- IRQF_NOBALANCING | IRQF_NO_THREAD , "arm-pmu" ,
123
- cpu_pmu );
133
+ irq = platform_get_irq (pmu_device , 0 );
134
+ if (irq >= 0 && irq_is_percpu (irq )) {
135
+ err = request_percpu_irq (irq , handler , "arm-pmu" , & percpu_pmu );
124
136
if (err ) {
125
137
pr_err ("unable to request IRQ%d for ARM PMU counters\n" ,
126
138
irq );
127
139
return err ;
128
140
}
129
-
130
- cpumask_set_cpu (i , & cpu_pmu -> active_irqs );
141
+ on_each_cpu (cpu_pmu_enable_percpu_irq , cpu_pmu , 1 );
142
+ } else {
143
+ for (i = 0 ; i < irqs ; ++ i ) {
144
+ err = 0 ;
145
+ irq = platform_get_irq (pmu_device , i );
146
+ if (irq < 0 )
147
+ continue ;
148
+
149
+ /*
150
+ * If we have a single PMU interrupt that we can't shift,
151
+ * assume that we're running on a uniprocessor machine and
152
+ * continue. Otherwise, continue without this interrupt.
153
+ */
154
+ if (irq_set_affinity (irq , cpumask_of (i )) && irqs > 1 ) {
155
+ pr_warning ("unable to set irq affinity (irq=%d, cpu=%u)\n" ,
156
+ irq , i );
157
+ continue ;
158
+ }
159
+
160
+ err = request_irq (irq , handler ,
161
+ IRQF_NOBALANCING | IRQF_NO_THREAD , "arm-pmu" ,
162
+ cpu_pmu );
163
+ if (err ) {
164
+ pr_err ("unable to request IRQ%d for ARM PMU counters\n" ,
165
+ irq );
166
+ return err ;
167
+ }
168
+
169
+ cpumask_set_cpu (i , & cpu_pmu -> active_irqs );
170
+ }
131
171
}
132
172
133
173
return 0 ;
@@ -141,6 +181,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
141
181
events -> events = per_cpu (hw_events , cpu );
142
182
events -> used_mask = per_cpu (used_mask , cpu );
143
183
raw_spin_lock_init (& events -> pmu_lock );
184
+ per_cpu (percpu_pmu , cpu ) = cpu_pmu ;
144
185
}
145
186
146
187
cpu_pmu -> get_hw_events = cpu_pmu_get_cpu_events ;
0 commit comments