@@ -100,6 +100,24 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
100
100
* need_flush = true;
101
101
}
102
102
103
+ static void load_new_mm_cr3 (pgd_t * pgdir , u16 new_asid , bool need_flush )
104
+ {
105
+ unsigned long new_mm_cr3 ;
106
+
107
+ if (need_flush ) {
108
+ new_mm_cr3 = build_cr3 (pgdir , new_asid );
109
+ } else {
110
+ new_mm_cr3 = build_cr3_noflush (pgdir , new_asid );
111
+ }
112
+
113
+ /*
114
+ * Caution: many callers of this function expect
115
+ * that load_cr3() is serializing and orders TLB
116
+ * fills with respect to the mm_cpumask writes.
117
+ */
118
+ write_cr3 (new_mm_cr3 );
119
+ }
120
+
103
121
void leave_mm (int cpu )
104
122
{
105
123
struct mm_struct * loaded_mm = this_cpu_read (cpu_tlbstate .loaded_mm );
@@ -230,7 +248,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
230
248
if (need_flush ) {
231
249
this_cpu_write (cpu_tlbstate .ctxs [new_asid ].ctx_id , next -> context .ctx_id );
232
250
this_cpu_write (cpu_tlbstate .ctxs [new_asid ].tlb_gen , next_tlb_gen );
233
- write_cr3 ( build_cr3 ( next -> pgd , new_asid ) );
251
+ load_new_mm_cr3 ( next -> pgd , new_asid , true );
234
252
235
253
/*
236
254
* NB: This gets called via leave_mm() in the idle path
@@ -243,7 +261,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
243
261
trace_tlb_flush_rcuidle (TLB_FLUSH_ON_TASK_SWITCH , TLB_FLUSH_ALL );
244
262
} else {
245
263
/* The new ASID is already up to date. */
246
- write_cr3 ( build_cr3_noflush ( next -> pgd , new_asid ) );
264
+ load_new_mm_cr3 ( next -> pgd , new_asid , false );
247
265
248
266
/* See above wrt _rcuidle. */
249
267
trace_tlb_flush_rcuidle (TLB_FLUSH_ON_TASK_SWITCH , 0 );
0 commit comments