@@ -217,6 +217,100 @@ static inline void replay_soft_interrupts_irqrestore(void)
217
217
#define replay_soft_interrupts_irqrestore () replay_soft_interrupts()
218
218
#endif
219
219
220
+ #ifdef CONFIG_CC_HAS_ASM_GOTO
221
+ notrace void arch_local_irq_restore (unsigned long mask )
222
+ {
223
+ unsigned char irq_happened ;
224
+
225
+ /* Write the new soft-enabled value if it is a disable */
226
+ if (mask ) {
227
+ irq_soft_mask_set (mask );
228
+ return ;
229
+ }
230
+
231
+ /*
232
+ * After the stb, interrupts are unmasked and there are no interrupts
233
+ * pending replay. The restart sequence makes this atomic with
234
+ * respect to soft-masked interrupts. If this was just a simple code
235
+ * sequence, a soft-masked interrupt could become pending right after
236
+ * the comparison and before the stb.
237
+ *
238
+ * This allows interrupts to be unmasked without hard disabling, and
239
+ * also without new hard interrupts coming in ahead of pending ones.
240
+ */
241
+ asm_volatile_goto (
242
+ "1: \n"
243
+ " lbz 9,%0(13) \n"
244
+ " cmpwi 9,0 \n"
245
+ " bne %l[happened] \n"
246
+ " stb 9,%1(13) \n"
247
+ "2: \n"
248
+ RESTART_TABLE (1b , 2b , 1b )
249
+ : : "i " (offsetof(struct paca_struct , irq_happened )),
250
+ "i" (offsetof(struct paca_struct , irq_soft_mask ))
251
+ : "cr0" , "r9"
252
+ : happened );
253
+
254
+ if (IS_ENABLED (CONFIG_PPC_IRQ_SOFT_MASK_DEBUG ))
255
+ WARN_ON_ONCE (!(mfmsr () & MSR_EE ));
256
+
257
+ return ;
258
+
259
+ happened :
260
+ irq_happened = get_irq_happened ();
261
+ if (IS_ENABLED (CONFIG_PPC_IRQ_SOFT_MASK_DEBUG ))
262
+ WARN_ON_ONCE (!irq_happened );
263
+
264
+ if (irq_happened == PACA_IRQ_HARD_DIS ) {
265
+ if (IS_ENABLED (CONFIG_PPC_IRQ_SOFT_MASK_DEBUG ))
266
+ WARN_ON_ONCE (mfmsr () & MSR_EE );
267
+ irq_soft_mask_set (IRQS_ENABLED );
268
+ local_paca -> irq_happened = 0 ;
269
+ __hard_irq_enable ();
270
+ return ;
271
+ }
272
+
273
+ /* Have interrupts to replay, need to hard disable first */
274
+ if (!(irq_happened & PACA_IRQ_HARD_DIS )) {
275
+ if (IS_ENABLED (CONFIG_PPC_IRQ_SOFT_MASK_DEBUG )) {
276
+ if (!(mfmsr () & MSR_EE )) {
277
+ /*
278
+ * An interrupt could have come in and cleared
279
+ * MSR[EE] and set IRQ_HARD_DIS, so check
280
+ * IRQ_HARD_DIS again and warn if it is still
281
+ * clear.
282
+ */
283
+ irq_happened = get_irq_happened ();
284
+ WARN_ON_ONCE (!(irq_happened & PACA_IRQ_HARD_DIS ));
285
+ }
286
+ }
287
+ __hard_irq_disable ();
288
+ local_paca -> irq_happened |= PACA_IRQ_HARD_DIS ;
289
+ } else {
290
+ if (IS_ENABLED (CONFIG_PPC_IRQ_SOFT_MASK_DEBUG )) {
291
+ if (WARN_ON_ONCE (mfmsr () & MSR_EE ))
292
+ __hard_irq_disable ();
293
+ }
294
+ }
295
+
296
+ /*
297
+ * Disable preempt here, so that the below preempt_enable will
298
+ * perform resched if required (a replayed interrupt may set
299
+ * need_resched).
300
+ */
301
+ preempt_disable ();
302
+ irq_soft_mask_set (IRQS_ALL_DISABLED );
303
+ trace_hardirqs_off ();
304
+
305
+ replay_soft_interrupts_irqrestore ();
306
+ local_paca -> irq_happened = 0 ;
307
+
308
+ trace_hardirqs_on ();
309
+ irq_soft_mask_set (IRQS_ENABLED );
310
+ __hard_irq_enable ();
311
+ preempt_enable ();
312
+ }
313
+ #else
220
314
notrace void arch_local_irq_restore (unsigned long mask )
221
315
{
222
316
unsigned char irq_happened ;
@@ -288,6 +382,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
288
382
__hard_irq_enable ();
289
383
preempt_enable ();
290
384
}
385
+ #endif
291
386
EXPORT_SYMBOL (arch_local_irq_restore );
292
387
293
388
/*
0 commit comments