@@ -193,82 +193,62 @@ void putback_movable_pages(struct list_head *l)
193
193
/*
194
194
* Restore a potential migration pte to a working pte entry
195
195
*/
196
- static int remove_migration_pte (struct page * new , struct vm_area_struct * vma ,
196
+ static int remove_migration_pte (struct page * page , struct vm_area_struct * vma ,
197
197
unsigned long addr , void * old )
198
198
{
199
- struct mm_struct * mm = vma -> vm_mm ;
199
+ struct page_vma_mapped_walk pvmw = {
200
+ .page = old ,
201
+ .vma = vma ,
202
+ .address = addr ,
203
+ .flags = PVMW_SYNC | PVMW_MIGRATION ,
204
+ };
205
+ struct page * new ;
206
+ pte_t pte ;
200
207
swp_entry_t entry ;
201
- pmd_t * pmd ;
202
- pte_t * ptep , pte ;
203
- spinlock_t * ptl ;
204
208
205
- if (unlikely (PageHuge (new ))) {
206
- ptep = huge_pte_offset (mm , addr );
207
- if (!ptep )
208
- goto out ;
209
- ptl = huge_pte_lockptr (hstate_vma (vma ), mm , ptep );
210
- } else {
211
- pmd = mm_find_pmd (mm , addr );
212
- if (!pmd )
213
- goto out ;
209
+ VM_BUG_ON_PAGE (PageTail (page ), page );
210
+ while (page_vma_mapped_walk (& pvmw )) {
211
+ new = page - pvmw .page -> index +
212
+ linear_page_index (vma , pvmw .address );
214
213
215
- ptep = pte_offset_map (pmd , addr );
214
+ get_page (new );
215
+ pte = pte_mkold (mk_pte (new , READ_ONCE (vma -> vm_page_prot )));
216
+ if (pte_swp_soft_dirty (* pvmw .pte ))
217
+ pte = pte_mksoft_dirty (pte );
216
218
217
219
/*
218
- * Peek to check is_swap_pte() before taking ptlock? No, we
219
- * can race mremap's move_ptes(), which skips anon_vma lock.
220
+ * Recheck VMA as permissions can change since migration started
220
221
*/
221
-
222
- ptl = pte_lockptr (mm , pmd );
223
- }
224
-
225
- spin_lock (ptl );
226
- pte = * ptep ;
227
- if (!is_swap_pte (pte ))
228
- goto unlock ;
229
-
230
- entry = pte_to_swp_entry (pte );
231
-
232
- if (!is_migration_entry (entry ) ||
233
- migration_entry_to_page (entry ) != old )
234
- goto unlock ;
235
-
236
- get_page (new );
237
- pte = pte_mkold (mk_pte (new , READ_ONCE (vma -> vm_page_prot )));
238
- if (pte_swp_soft_dirty (* ptep ))
239
- pte = pte_mksoft_dirty (pte );
240
-
241
- /* Recheck VMA as permissions can change since migration started */
242
- if (is_write_migration_entry (entry ))
243
- pte = maybe_mkwrite (pte , vma );
222
+ entry = pte_to_swp_entry (* pvmw .pte );
223
+ if (is_write_migration_entry (entry ))
224
+ pte = maybe_mkwrite (pte , vma );
244
225
245
226
#ifdef CONFIG_HUGETLB_PAGE
246
- if (PageHuge (new )) {
247
- pte = pte_mkhuge (pte );
248
- pte = arch_make_huge_pte (pte , vma , new , 0 );
249
- }
227
+ if (PageHuge (new )) {
228
+ pte = pte_mkhuge (pte );
229
+ pte = arch_make_huge_pte (pte , vma , new , 0 );
230
+ }
250
231
#endif
251
- flush_dcache_page (new );
252
- set_pte_at (mm , addr , ptep , pte );
232
+ flush_dcache_page (new );
233
+ set_pte_at (vma -> vm_mm , pvmw . address , pvmw . pte , pte );
253
234
254
- if (PageHuge (new )) {
255
- if (PageAnon (new ))
256
- hugepage_add_anon_rmap (new , vma , addr );
235
+ if (PageHuge (new )) {
236
+ if (PageAnon (new ))
237
+ hugepage_add_anon_rmap (new , vma , pvmw .address );
238
+ else
239
+ page_dup_rmap (new , true);
240
+ } else if (PageAnon (new ))
241
+ page_add_anon_rmap (new , vma , pvmw .address , false);
257
242
else
258
- page_dup_rmap (new , true);
259
- } else if (PageAnon (new ))
260
- page_add_anon_rmap (new , vma , addr , false);
261
- else
262
- page_add_file_rmap (new , false);
243
+ page_add_file_rmap (new , false);
263
244
264
- if (vma -> vm_flags & VM_LOCKED && !PageTransCompound (new ))
265
- mlock_vma_page (new );
245
+ if (vma -> vm_flags & VM_LOCKED && !PageTransCompound (new ))
246
+ mlock_vma_page (new );
247
+
248
+ /* No need to invalidate - it was non-present before */
249
+ update_mmu_cache (vma , pvmw .address , pvmw .pte );
250
+ }
266
251
267
- /* No need to invalidate - it was non-present before */
268
- update_mmu_cache (vma , addr , ptep );
269
- unlock :
270
- pte_unmap_unlock (ptep , ptl );
271
- out :
272
252
return SWAP_AGAIN ;
273
253
}
274
254
0 commit comments