@@ -196,21 +196,82 @@ static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid
196
196
trace_tlbie (lpid , 0 , rb , rs , ric , prs , r );
197
197
}
198
198
199
- static inline void fixup_tlbie (void )
199
+
200
+ static inline void fixup_tlbie_va (unsigned long va , unsigned long pid ,
201
+ unsigned long ap )
202
+ {
203
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_ERAT_BUG )) {
204
+ asm volatile ("ptesync" : : :"memory" );
205
+ __tlbie_va (va , 0 , ap , RIC_FLUSH_TLB );
206
+ }
207
+
208
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_STQ_BUG )) {
209
+ asm volatile ("ptesync" : : :"memory" );
210
+ __tlbie_va (va , pid , ap , RIC_FLUSH_TLB );
211
+ }
212
+ }
213
+
214
+ static inline void fixup_tlbie_va_range (unsigned long va , unsigned long pid ,
215
+ unsigned long ap )
216
+ {
217
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_ERAT_BUG )) {
218
+ asm volatile ("ptesync" : : :"memory" );
219
+ __tlbie_pid (0 , RIC_FLUSH_TLB );
220
+ }
221
+
222
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_STQ_BUG )) {
223
+ asm volatile ("ptesync" : : :"memory" );
224
+ __tlbie_va (va , pid , ap , RIC_FLUSH_TLB );
225
+ }
226
+ }
227
+
228
+ static inline void fixup_tlbie_pid (unsigned long pid )
200
229
{
201
- unsigned long pid = 0 ;
230
+ /*
231
+ * We can use any address for the invalidation, pick one which is
232
+ * probably unused as an optimisation.
233
+ */
202
234
unsigned long va = ((1UL << 52 ) - 1 );
203
235
236
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_ERAT_BUG )) {
237
+ asm volatile ("ptesync" : : :"memory" );
238
+ __tlbie_pid (0 , RIC_FLUSH_TLB );
239
+ }
240
+
204
241
if (cpu_has_feature (CPU_FTR_P9_TLBIE_STQ_BUG )) {
205
242
asm volatile ("ptesync" : : :"memory" );
206
243
__tlbie_va (va , pid , mmu_get_ap (MMU_PAGE_64K ), RIC_FLUSH_TLB );
207
244
}
208
245
}
209
246
247
+
248
+ static inline void fixup_tlbie_lpid_va (unsigned long va , unsigned long lpid ,
249
+ unsigned long ap )
250
+ {
251
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_ERAT_BUG )) {
252
+ asm volatile ("ptesync" : : :"memory" );
253
+ __tlbie_lpid_va (va , 0 , ap , RIC_FLUSH_TLB );
254
+ }
255
+
256
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_STQ_BUG )) {
257
+ asm volatile ("ptesync" : : :"memory" );
258
+ __tlbie_lpid_va (va , lpid , ap , RIC_FLUSH_TLB );
259
+ }
260
+ }
261
+
210
262
static inline void fixup_tlbie_lpid (unsigned long lpid )
211
263
{
264
+ /*
265
+ * We can use any address for the invalidation, pick one which is
266
+ * probably unused as an optimisation.
267
+ */
212
268
unsigned long va = ((1UL << 52 ) - 1 );
213
269
270
+ if (cpu_has_feature (CPU_FTR_P9_TLBIE_ERAT_BUG )) {
271
+ asm volatile ("ptesync" : : :"memory" );
272
+ __tlbie_lpid (0 , RIC_FLUSH_TLB );
273
+ }
274
+
214
275
if (cpu_has_feature (CPU_FTR_P9_TLBIE_STQ_BUG )) {
215
276
asm volatile ("ptesync" : : :"memory" );
216
277
__tlbie_lpid_va (va , lpid , mmu_get_ap (MMU_PAGE_64K ), RIC_FLUSH_TLB );
@@ -258,15 +319,16 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
258
319
switch (ric ) {
259
320
case RIC_FLUSH_TLB :
260
321
__tlbie_pid (pid , RIC_FLUSH_TLB );
322
+ fixup_tlbie_pid (pid );
261
323
break ;
262
324
case RIC_FLUSH_PWC :
263
325
__tlbie_pid (pid , RIC_FLUSH_PWC );
264
326
break ;
265
327
case RIC_FLUSH_ALL :
266
328
default :
267
329
__tlbie_pid (pid , RIC_FLUSH_ALL );
330
+ fixup_tlbie_pid (pid );
268
331
}
269
- fixup_tlbie ();
270
332
asm volatile ("eieio; tlbsync; ptesync" : : :"memory" );
271
333
}
272
334
@@ -315,15 +377,16 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
315
377
switch (ric ) {
316
378
case RIC_FLUSH_TLB :
317
379
__tlbie_lpid (lpid , RIC_FLUSH_TLB );
380
+ fixup_tlbie_lpid (lpid );
318
381
break ;
319
382
case RIC_FLUSH_PWC :
320
383
__tlbie_lpid (lpid , RIC_FLUSH_PWC );
321
384
break ;
322
385
case RIC_FLUSH_ALL :
323
386
default :
324
387
__tlbie_lpid (lpid , RIC_FLUSH_ALL );
388
+ fixup_tlbie_lpid (lpid );
325
389
}
326
- fixup_tlbie_lpid (lpid );
327
390
asm volatile ("eieio; tlbsync; ptesync" : : :"memory" );
328
391
}
329
392
@@ -390,6 +453,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
390
453
391
454
for (addr = start ; addr < end ; addr += page_size )
392
455
__tlbie_va (addr , pid , ap , RIC_FLUSH_TLB );
456
+
457
+ fixup_tlbie_va_range (addr - page_size , pid , ap );
393
458
}
394
459
395
460
static __always_inline void _tlbie_va (unsigned long va , unsigned long pid ,
@@ -399,7 +464,7 @@ static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
399
464
400
465
asm volatile ("ptesync" : : :"memory" );
401
466
__tlbie_va (va , pid , ap , ric );
402
- fixup_tlbie ( );
467
+ fixup_tlbie_va ( va , pid , ap );
403
468
asm volatile ("eieio; tlbsync; ptesync" : : :"memory" );
404
469
}
405
470
@@ -457,7 +522,7 @@ static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
457
522
458
523
asm volatile ("ptesync" : : :"memory" );
459
524
__tlbie_lpid_va (va , lpid , ap , ric );
460
- fixup_tlbie_lpid ( lpid );
525
+ fixup_tlbie_lpid_va ( va , lpid , ap );
461
526
asm volatile ("eieio; tlbsync; ptesync" : : :"memory" );
462
527
}
463
528
@@ -469,7 +534,6 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
469
534
if (also_pwc )
470
535
__tlbie_pid (pid , RIC_FLUSH_PWC );
471
536
__tlbie_va_range (start , end , pid , page_size , psize );
472
- fixup_tlbie ();
473
537
asm volatile ("eieio; tlbsync; ptesync" : : :"memory" );
474
538
}
475
539
@@ -856,7 +920,7 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
856
920
if (gflush )
857
921
__tlbie_va_range (gstart , gend , pid ,
858
922
PUD_SIZE , MMU_PAGE_1G );
859
- fixup_tlbie ();
923
+
860
924
asm volatile ("eieio; tlbsync; ptesync" : : :"memory" );
861
925
} else {
862
926
_tlbiel_va_range_multicast (mm ,
0 commit comments