@@ -296,30 +296,18 @@ EXPORT_SYMBOL(ib_umem_odp_release);
296
296
static int ib_umem_odp_map_dma_single_page (
297
297
struct ib_umem_odp * umem_odp ,
298
298
unsigned int dma_index ,
299
- struct page * page ,
300
- u64 access_mask )
299
+ struct page * page )
301
300
{
302
301
struct ib_device * dev = umem_odp -> umem .ibdev ;
303
302
dma_addr_t * dma_addr = & umem_odp -> dma_list [dma_index ];
304
303
305
- if (* dma_addr ) {
306
- /*
307
- * If the page is already dma mapped it means it went through
308
- * a non-invalidating trasition, like read-only to writable.
309
- * Resync the flags.
310
- */
311
- * dma_addr = (* dma_addr & ODP_DMA_ADDR_MASK ) | access_mask ;
312
- return 0 ;
313
- }
314
-
315
304
* dma_addr = ib_dma_map_page (dev , page , 0 , 1 << umem_odp -> page_shift ,
316
305
DMA_BIDIRECTIONAL );
317
306
if (ib_dma_mapping_error (dev , * dma_addr )) {
318
307
* dma_addr = 0 ;
319
308
return - EFAULT ;
320
309
}
321
310
umem_odp -> npages ++ ;
322
- * dma_addr |= access_mask ;
323
311
return 0 ;
324
312
}
325
313
@@ -355,9 +343,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
355
343
struct hmm_range range = {};
356
344
unsigned long timeout ;
357
345
358
- if (access_mask == 0 )
359
- return - EINVAL ;
360
-
361
346
if (user_virt < ib_umem_start (umem_odp ) ||
362
347
user_virt + bcnt > ib_umem_end (umem_odp ))
363
348
return - EFAULT ;
@@ -383,7 +368,7 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
383
368
if (fault ) {
384
369
range .default_flags = HMM_PFN_REQ_FAULT ;
385
370
386
- if (access_mask & ODP_WRITE_ALLOWED_BIT )
371
+ if (access_mask & HMM_PFN_WRITE )
387
372
range .default_flags |= HMM_PFN_REQ_WRITE ;
388
373
}
389
374
@@ -415,22 +400,17 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
415
400
for (pfn_index = 0 ; pfn_index < num_pfns ;
416
401
pfn_index += 1 << (page_shift - PAGE_SHIFT ), dma_index ++ ) {
417
402
418
- if (fault ) {
419
- /*
420
- * Since we asked for hmm_range_fault() to populate
421
- * pages it shouldn't return an error entry on success.
422
- */
423
- WARN_ON (range .hmm_pfns [pfn_index ] & HMM_PFN_ERROR );
424
- WARN_ON (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ));
425
- } else {
426
- if (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID )) {
427
- WARN_ON (umem_odp -> dma_list [dma_index ]);
428
- continue ;
429
- }
430
- access_mask = ODP_READ_ALLOWED_BIT ;
431
- if (range .hmm_pfns [pfn_index ] & HMM_PFN_WRITE )
432
- access_mask |= ODP_WRITE_ALLOWED_BIT ;
433
- }
403
+ /*
404
+ * Since we asked for hmm_range_fault() to populate
405
+ * pages it shouldn't return an error entry on success.
406
+ */
407
+ WARN_ON (fault && range .hmm_pfns [pfn_index ] & HMM_PFN_ERROR );
408
+ WARN_ON (fault && !(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ));
409
+ if (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ))
410
+ continue ;
411
+
412
+ if (range .hmm_pfns [pfn_index ] & HMM_PFN_DMA_MAPPED )
413
+ continue ;
434
414
435
415
hmm_order = hmm_pfn_to_map_order (range .hmm_pfns [pfn_index ]);
436
416
/* If a hugepage was detected and ODP wasn't set for, the umem
@@ -445,13 +425,14 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
445
425
}
446
426
447
427
ret = ib_umem_odp_map_dma_single_page (
448
- umem_odp , dma_index , hmm_pfn_to_page ( range . hmm_pfns [ pfn_index ]) ,
449
- access_mask );
428
+ umem_odp , dma_index ,
429
+ hmm_pfn_to_page ( range . hmm_pfns [ pfn_index ]) );
450
430
if (ret < 0 ) {
451
431
ibdev_dbg (umem_odp -> umem .ibdev ,
452
432
"ib_umem_odp_map_dma_single_page failed with error %d\n" , ret );
453
433
break ;
454
434
}
435
+ range .hmm_pfns [pfn_index ] |= HMM_PFN_DMA_MAPPED ;
455
436
}
456
437
/* upon success lock should stay on hold for the callee */
457
438
if (!ret )
@@ -471,7 +452,6 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
471
452
void ib_umem_odp_unmap_dma_pages (struct ib_umem_odp * umem_odp , u64 virt ,
472
453
u64 bound )
473
454
{
474
- dma_addr_t dma_addr ;
475
455
dma_addr_t dma ;
476
456
int idx ;
477
457
u64 addr ;
@@ -482,34 +462,37 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
482
462
virt = max_t (u64 , virt , ib_umem_start (umem_odp ));
483
463
bound = min_t (u64 , bound , ib_umem_end (umem_odp ));
484
464
for (addr = virt ; addr < bound ; addr += BIT (umem_odp -> page_shift )) {
465
+ unsigned long pfn_idx = (addr - ib_umem_start (umem_odp )) >>
466
+ PAGE_SHIFT ;
467
+ struct page * page =
468
+ hmm_pfn_to_page (umem_odp -> pfn_list [pfn_idx ]);
469
+
485
470
idx = (addr - ib_umem_start (umem_odp )) >> umem_odp -> page_shift ;
486
471
dma = umem_odp -> dma_list [idx ];
487
472
488
- /* The access flags guaranteed a valid DMA address in case was NULL */
489
- if (dma ) {
490
- unsigned long pfn_idx = (addr - ib_umem_start (umem_odp )) >> PAGE_SHIFT ;
491
- struct page * page = hmm_pfn_to_page (umem_odp -> pfn_list [pfn_idx ]);
492
-
493
- dma_addr = dma & ODP_DMA_ADDR_MASK ;
494
- ib_dma_unmap_page (dev , dma_addr ,
495
- BIT (umem_odp -> page_shift ),
496
- DMA_BIDIRECTIONAL );
497
- if (dma & ODP_WRITE_ALLOWED_BIT ) {
498
- struct page * head_page = compound_head (page );
499
- /*
500
- * set_page_dirty prefers being called with
501
- * the page lock. However, MMU notifiers are
502
- * called sometimes with and sometimes without
503
- * the lock. We rely on the umem_mutex instead
504
- * to prevent other mmu notifiers from
505
- * continuing and allowing the page mapping to
506
- * be removed.
507
- */
508
- set_page_dirty (head_page );
509
- }
510
- umem_odp -> dma_list [idx ] = 0 ;
511
- umem_odp -> npages -- ;
473
+ if (!(umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_VALID ))
474
+ goto clear ;
475
+ if (!(umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_DMA_MAPPED ))
476
+ goto clear ;
477
+
478
+ ib_dma_unmap_page (dev , dma , BIT (umem_odp -> page_shift ),
479
+ DMA_BIDIRECTIONAL );
480
+ if (umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_WRITE ) {
481
+ struct page * head_page = compound_head (page );
482
+ /*
483
+ * set_page_dirty prefers being called with
484
+ * the page lock. However, MMU notifiers are
485
+ * called sometimes with and sometimes without
486
+ * the lock. We rely on the umem_mutex instead
487
+ * to prevent other mmu notifiers from
488
+ * continuing and allowing the page mapping to
489
+ * be removed.
490
+ */
491
+ set_page_dirty (head_page );
512
492
}
493
+ umem_odp -> npages -- ;
494
+ clear :
495
+ umem_odp -> pfn_list [pfn_idx ] &= ~HMM_PFN_FLAGS ;
513
496
}
514
497
}
515
498
EXPORT_SYMBOL (ib_umem_odp_unmap_dma_pages );
0 commit comments