@@ -214,6 +214,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
214
214
{
215
215
struct dmirror * dmirror = container_of (mni , struct dmirror , notifier );
216
216
217
+ /*
218
+ * Ignore invalidation callbacks for device private pages since
219
+ * the invalidation is handled as part of the migration process.
220
+ */
221
+ if (range -> event == MMU_NOTIFY_MIGRATE &&
222
+ range -> migrate_pgmap_owner == dmirror -> mdevice )
223
+ return true;
224
+
217
225
if (mmu_notifier_range_blockable (range ))
218
226
mutex_lock (& dmirror -> mutex );
219
227
else if (!mutex_trylock (& dmirror -> mutex ))
@@ -693,7 +701,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
693
701
args .dst = dst_pfns ;
694
702
args .start = addr ;
695
703
args .end = next ;
696
- args .pgmap_owner = NULL ;
704
+ args .pgmap_owner = dmirror -> mdevice ;
697
705
args .flags = MIGRATE_VMA_SELECT_SYSTEM ;
698
706
ret = migrate_vma_setup (& args );
699
707
if (ret )
@@ -983,7 +991,7 @@ static void dmirror_devmem_free(struct page *page)
983
991
}
984
992
985
993
static vm_fault_t dmirror_devmem_fault_alloc_and_copy (struct migrate_vma * args ,
986
- struct dmirror_device * mdevice )
994
+ struct dmirror * dmirror )
987
995
{
988
996
const unsigned long * src = args -> src ;
989
997
unsigned long * dst = args -> dst ;
@@ -1005,6 +1013,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
1005
1013
continue ;
1006
1014
1007
1015
lock_page (dpage );
1016
+ xa_erase (& dmirror -> pt , addr >> PAGE_SHIFT );
1008
1017
copy_highpage (dpage , spage );
1009
1018
* dst = migrate_pfn (page_to_pfn (dpage )) | MIGRATE_PFN_LOCKED ;
1010
1019
if (* src & MIGRATE_PFN_WRITE )
@@ -1013,15 +1022,6 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
1013
1022
return 0 ;
1014
1023
}
1015
1024
1016
- static void dmirror_devmem_fault_finalize_and_map (struct migrate_vma * args ,
1017
- struct dmirror * dmirror )
1018
- {
1019
- /* Invalidate the device's page table mapping. */
1020
- mutex_lock (& dmirror -> mutex );
1021
- dmirror_do_update (dmirror , args -> start , args -> end );
1022
- mutex_unlock (& dmirror -> mutex );
1023
- }
1024
-
1025
1025
static vm_fault_t dmirror_devmem_fault (struct vm_fault * vmf )
1026
1026
{
1027
1027
struct migrate_vma args ;
@@ -1051,11 +1051,15 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
1051
1051
if (migrate_vma_setup (& args ))
1052
1052
return VM_FAULT_SIGBUS ;
1053
1053
1054
- ret = dmirror_devmem_fault_alloc_and_copy (& args , dmirror -> mdevice );
1054
+ ret = dmirror_devmem_fault_alloc_and_copy (& args , dmirror );
1055
1055
if (ret )
1056
1056
return ret ;
1057
1057
migrate_vma_pages (& args );
1058
- dmirror_devmem_fault_finalize_and_map (& args , dmirror );
1058
+ /*
1059
+ * No device finalize step is needed since
1060
+ * dmirror_devmem_fault_alloc_and_copy() will have already
1061
+ * invalidated the device page table.
1062
+ */
1059
1063
migrate_vma_finalize (& args );
1060
1064
return 0 ;
1061
1065
}
0 commit comments