19
19
20
20
#include "vgic.h"
21
21
22
+ #define CREATE_TRACE_POINTS
23
+ #include "../trace.h"
24
+
25
+ #ifdef CONFIG_DEBUG_SPINLOCK
26
+ #define DEBUG_SPINLOCK_BUG_ON (p ) BUG_ON(p)
27
+ #else
28
+ #define DEBUG_SPINLOCK_BUG_ON (p )
29
+ #endif
30
+
22
31
struct vgic_global __section (.hyp .text ) kvm_vgic_global_state ;
23
32
33
+ /*
34
+ * Locking order is always:
35
+ * vgic_cpu->ap_list_lock
36
+ * vgic_irq->irq_lock
37
+ *
38
+ * (that is, always take the ap_list_lock before the struct vgic_irq lock).
39
+ *
40
+ * When taking more than one ap_list_lock at the same time, always take the
41
+ * lowest numbered VCPU's ap_list_lock first, so:
42
+ * vcpuX->vcpu_id < vcpuY->vcpu_id:
43
+ * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
44
+ * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
45
+ */
46
+
24
47
struct vgic_irq * vgic_get_irq (struct kvm * kvm , struct kvm_vcpu * vcpu ,
25
48
u32 intid )
26
49
{
@@ -39,3 +62,191 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
39
62
WARN (1 , "Looking up struct vgic_irq for reserved INTID" );
40
63
return NULL ;
41
64
}
65
+
66
+ /**
67
+ * kvm_vgic_target_oracle - compute the target vcpu for an irq
68
+ *
69
+ * @irq: The irq to route. Must be already locked.
70
+ *
71
+ * Based on the current state of the interrupt (enabled, pending,
72
+ * active, vcpu and target_vcpu), compute the next vcpu this should be
73
+ * given to. Return NULL if this shouldn't be injected at all.
74
+ *
75
+ * Requires the IRQ lock to be held.
76
+ */
77
+ static struct kvm_vcpu * vgic_target_oracle (struct vgic_irq * irq )
78
+ {
79
+ DEBUG_SPINLOCK_BUG_ON (!spin_is_locked (& irq -> irq_lock ));
80
+
81
+ /* If the interrupt is active, it must stay on the current vcpu */
82
+ if (irq -> active )
83
+ return irq -> vcpu ? : irq -> target_vcpu ;
84
+
85
+ /*
86
+ * If the IRQ is not active but enabled and pending, we should direct
87
+ * it to its configured target VCPU.
88
+ * If the distributor is disabled, pending interrupts shouldn't be
89
+ * forwarded.
90
+ */
91
+ if (irq -> enabled && irq -> pending ) {
92
+ if (unlikely (irq -> target_vcpu &&
93
+ !irq -> target_vcpu -> kvm -> arch .vgic .enabled ))
94
+ return NULL ;
95
+
96
+ return irq -> target_vcpu ;
97
+ }
98
+
99
+ /* If neither active nor pending and enabled, then this IRQ should not
100
+ * be queued to any VCPU.
101
+ */
102
+ return NULL ;
103
+ }
104
+
105
+ /*
106
+ * Only valid injection if changing level for level-triggered IRQs or for a
107
+ * rising edge.
108
+ */
109
+ static bool vgic_validate_injection (struct vgic_irq * irq , bool level )
110
+ {
111
+ switch (irq -> config ) {
112
+ case VGIC_CONFIG_LEVEL :
113
+ return irq -> line_level != level ;
114
+ case VGIC_CONFIG_EDGE :
115
+ return level ;
116
+ }
117
+
118
+ return false;
119
+ }
120
+
121
+ /*
122
+ * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
123
+ * Do the queuing if necessary, taking the right locks in the right order.
124
+ * Returns true when the IRQ was queued, false otherwise.
125
+ *
126
+ * Needs to be entered with the IRQ lock already held, but will return
127
+ * with all locks dropped.
128
+ */
129
+ bool vgic_queue_irq_unlock (struct kvm * kvm , struct vgic_irq * irq )
130
+ {
131
+ struct kvm_vcpu * vcpu ;
132
+
133
+ DEBUG_SPINLOCK_BUG_ON (!spin_is_locked (& irq -> irq_lock ));
134
+
135
+ retry :
136
+ vcpu = vgic_target_oracle (irq );
137
+ if (irq -> vcpu || !vcpu ) {
138
+ /*
139
+ * If this IRQ is already on a VCPU's ap_list, then it
140
+ * cannot be moved or modified and there is no more work for
141
+ * us to do.
142
+ *
143
+ * Otherwise, if the irq is not pending and enabled, it does
144
+ * not need to be inserted into an ap_list and there is also
145
+ * no more work for us to do.
146
+ */
147
+ spin_unlock (& irq -> irq_lock );
148
+ return false;
149
+ }
150
+
151
+ /*
152
+ * We must unlock the irq lock to take the ap_list_lock where
153
+ * we are going to insert this new pending interrupt.
154
+ */
155
+ spin_unlock (& irq -> irq_lock );
156
+
157
+ /* someone can do stuff here, which we re-check below */
158
+
159
+ spin_lock (& vcpu -> arch .vgic_cpu .ap_list_lock );
160
+ spin_lock (& irq -> irq_lock );
161
+
162
+ /*
163
+ * Did something change behind our backs?
164
+ *
165
+ * There are two cases:
166
+ * 1) The irq lost its pending state or was disabled behind our
167
+ * backs and/or it was queued to another VCPU's ap_list.
168
+ * 2) Someone changed the affinity on this irq behind our
169
+ * backs and we are now holding the wrong ap_list_lock.
170
+ *
171
+ * In both cases, drop the locks and retry.
172
+ */
173
+
174
+ if (unlikely (irq -> vcpu || vcpu != vgic_target_oracle (irq ))) {
175
+ spin_unlock (& irq -> irq_lock );
176
+ spin_unlock (& vcpu -> arch .vgic_cpu .ap_list_lock );
177
+
178
+ spin_lock (& irq -> irq_lock );
179
+ goto retry ;
180
+ }
181
+
182
+ list_add_tail (& irq -> ap_list , & vcpu -> arch .vgic_cpu .ap_list_head );
183
+ irq -> vcpu = vcpu ;
184
+
185
+ spin_unlock (& irq -> irq_lock );
186
+ spin_unlock (& vcpu -> arch .vgic_cpu .ap_list_lock );
187
+
188
+ kvm_vcpu_kick (vcpu );
189
+
190
+ return true;
191
+ }
192
+
193
+ static int vgic_update_irq_pending (struct kvm * kvm , int cpuid ,
194
+ unsigned int intid , bool level ,
195
+ bool mapped_irq )
196
+ {
197
+ struct kvm_vcpu * vcpu ;
198
+ struct vgic_irq * irq ;
199
+ int ret ;
200
+
201
+ trace_vgic_update_irq_pending (cpuid , intid , level );
202
+
203
+ vcpu = kvm_get_vcpu (kvm , cpuid );
204
+ if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS )
205
+ return - EINVAL ;
206
+
207
+ irq = vgic_get_irq (kvm , vcpu , intid );
208
+ if (!irq )
209
+ return - EINVAL ;
210
+
211
+ if (irq -> hw != mapped_irq )
212
+ return - EINVAL ;
213
+
214
+ spin_lock (& irq -> irq_lock );
215
+
216
+ if (!vgic_validate_injection (irq , level )) {
217
+ /* Nothing to see here, move along... */
218
+ spin_unlock (& irq -> irq_lock );
219
+ return 0 ;
220
+ }
221
+
222
+ if (irq -> config == VGIC_CONFIG_LEVEL ) {
223
+ irq -> line_level = level ;
224
+ irq -> pending = level || irq -> soft_pending ;
225
+ } else {
226
+ irq -> pending = true;
227
+ }
228
+
229
+ vgic_queue_irq_unlock (kvm , irq );
230
+
231
+ return 0 ;
232
+ }
233
+
234
+ /**
235
+ * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
236
+ * @kvm: The VM structure pointer
237
+ * @cpuid: The CPU for PPIs
238
+ * @intid: The INTID to inject a new state to.
239
+ * @level: Edge-triggered: true: to trigger the interrupt
240
+ * false: to ignore the call
241
+ * Level-sensitive true: raise the input signal
242
+ * false: lower the input signal
243
+ *
244
+ * The VGIC is not concerned with devices being active-LOW or active-HIGH for
245
+ * level-sensitive interrupts. You can think of the level parameter as 1
246
+ * being HIGH and 0 being LOW and all devices being active-HIGH.
247
+ */
248
+ int kvm_vgic_inject_irq (struct kvm * kvm , int cpuid , unsigned int intid ,
249
+ bool level )
250
+ {
251
+ return vgic_update_irq_pending (kvm , cpuid , intid , level , false);
252
+ }
0 commit comments