Skip to content

Commit fffe3ae

Browse files
committed
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull hmm updates from Jason Gunthorpe: "Ralph has been working on nouveau's use of hmm_range_fault() and migrate_vma() which resulted in this small series. It adds reporting of the page table order from hmm_range_fault() and some optimization of migrate_vma(): - Report the size of the page table mapping out of hmm_range_fault(). This makes it easier to establish a large/huge/etc mapping in the device's page table. - Allow devices to ignore the invalidations during migration in cases where the migration is not going to change pages. For instance migrating pages to a device does not require the device to invalidate pages already in the device. - Update nouveau and hmm_tests to use the above" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: mm/hmm/test: use the new migration invalidation nouveau/svm: use the new migration invalidation mm/notifier: add migration invalidation type mm/migrate: add a flags parameter to migrate_vma nouveau: fix storing invalid ptes nouveau/hmm: support mapping large sysmem pages nouveau: fix mapping 2MB sysmem pages nouveau/hmm: fault one page at a time mm/hmm: add tests for hmm_pfn_to_map_order() mm/hmm: provide the page mapping order in hmm_range_fault()
2 parents 8f7be62 + 7d17e83 commit fffe3ae

File tree

14 files changed

+410
-200
lines changed

14 files changed

+410
-200
lines changed

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -400,6 +400,7 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
400400
mig.end = end;
401401
mig.src = &src_pfn;
402402
mig.dst = &dst_pfn;
403+
mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
403404

404405
/*
405406
* We come here with mmap_lock write lock held just for
@@ -577,7 +578,8 @@ kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
577578
mig.end = end;
578579
mig.src = &src_pfn;
579580
mig.dst = &dst_pfn;
580-
mig.src_owner = &kvmppc_uvmem_pgmap;
581+
mig.pgmap_owner = &kvmppc_uvmem_pgmap;
582+
mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
581583

582584
mutex_lock(&kvm->arch.uvmem_lock);
583585
/* The requested page is already paged-out, nothing to do */

drivers/gpu/drm/nouveau/nouveau_dmem.c

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
140140
{
141141
struct device *dev = drm->dev->dev;
142142
struct page *dpage, *spage;
143+
struct nouveau_svmm *svmm;
143144

144145
spage = migrate_pfn_to_page(args->src[0]);
145146
if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
@@ -154,14 +155,19 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
154155
if (dma_mapping_error(dev, *dma_addr))
155156
goto error_free_page;
156157

158+
svmm = spage->zone_device_data;
159+
mutex_lock(&svmm->mutex);
160+
nouveau_svmm_invalidate(svmm, args->start, args->end);
157161
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
158162
NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
159163
goto error_dma_unmap;
164+
mutex_unlock(&svmm->mutex);
160165

161166
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
162167
return 0;
163168

164169
error_dma_unmap:
170+
mutex_unlock(&svmm->mutex);
165171
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
166172
error_free_page:
167173
__free_page(dpage);
@@ -182,7 +188,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
182188
.end = vmf->address + PAGE_SIZE,
183189
.src = &src,
184190
.dst = &dst,
185-
.src_owner = drm->dev,
191+
.pgmap_owner = drm->dev,
192+
.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
186193
};
187194

188195
/*
@@ -530,7 +537,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
530537
}
531538

532539
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
533-
unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
540+
struct nouveau_svmm *svmm, unsigned long src,
541+
dma_addr_t *dma_addr, u64 *pfn)
534542
{
535543
struct device *dev = drm->dev->dev;
536544
struct page *dpage, *spage;
@@ -560,6 +568,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
560568
goto out_free_page;
561569
}
562570

571+
dpage->zone_device_data = svmm;
563572
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
564573
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
565574
if (src & MIGRATE_PFN_WRITE)
@@ -583,8 +592,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
583592
unsigned long addr = args->start, nr_dma = 0, i;
584593

585594
for (i = 0; addr < args->end; i++) {
586-
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
587-
dma_addrs + nr_dma, pfns + i);
595+
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
596+
args->src[i], dma_addrs + nr_dma, pfns + i);
588597
if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
589598
nr_dma++;
590599
addr += PAGE_SIZE;
@@ -615,6 +624,8 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
615624
struct migrate_vma args = {
616625
.vma = vma,
617626
.start = start,
627+
.pgmap_owner = drm->dev,
628+
.flags = MIGRATE_VMA_SELECT_SYSTEM,
618629
};
619630
unsigned long i;
620631
u64 *pfns;

0 commit comments

Comments
 (0)