Skip to content

Commit 259e9bd

Browse files
ddmatsurleon
authored andcommitted
RDMA/core: Avoid hmm_dma_map_alloc() for virtual DMA devices
Drivers such as rxe, which use virtual DMA, must not call into the DMA mapping core since they lack physical DMA capabilities. Otherwise, a NULL pointer dereference is observed as shown below. This patch ensures the RDMA core handles virtual and physical DMA paths appropriately. This fixes the following kernel oops: BUG: kernel NULL pointer dereference, address: 00000000000002fc #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 1028eb067 P4D 1028eb067 PUD 105da0067 PMD 0 Oops: Oops: 0000 [#1] SMP NOPTI CPU: 3 UID: 1000 PID: 1854 Comm: python3 Tainted: G W 6.15.0-rc1+ #11 PREEMPT(voluntary) Tainted: [W]=WARN Hardware name: Trigkey Key N/Key N, BIOS KEYN101 09/02/2024 RIP: 0010:hmm_dma_map_alloc+0x25/0x100 Code: 90 90 90 90 90 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 49 89 d6 49 c1 e6 0c 41 55 41 54 53 49 39 ce 0f 82 c6 00 00 00 49 89 fc <f6> 87 fc 02 00 00 20 0f 84 af 00 00 00 49 89 f5 48 89 d3 49 89 cf RSP: 0018:ffffd3d3420eb830 EFLAGS: 00010246 RAX: 0000000000001000 RBX: ffff8b727c7f7400 RCX: 0000000000001000 RDX: 0000000000000001 RSI: ffff8b727c7f74b0 RDI: 0000000000000000 RBP: ffffd3d3420eb858 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 00007262a622a000 R14: 0000000000001000 R15: ffff8b727c7f74b0 FS: 00007262a62a1080(0000) GS:ffff8b762ac3e000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000002fc CR3: 000000010a1f0004 CR4: 0000000000f72ef0 PKRU: 55555554 Call Trace: <TASK> ib_init_umem_odp+0xb6/0x110 [ib_uverbs] ib_umem_odp_get+0xf0/0x150 [ib_uverbs] rxe_odp_mr_init_user+0x71/0x170 [rdma_rxe] rxe_reg_user_mr+0x217/0x2e0 [rdma_rxe] ib_uverbs_reg_mr+0x19e/0x2e0 [ib_uverbs] ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0xd9/0x150 [ib_uverbs] ib_uverbs_cmd_verbs+0xd19/0xee0 [ib_uverbs] ? mmap_region+0x63/0xd0 ? __pfx_ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0x10/0x10 [ib_uverbs] ib_uverbs_ioctl+0xba/0x130 [ib_uverbs] __x64_sys_ioctl+0xa4/0xe0 x64_sys_call+0x1178/0x2660 do_syscall_64+0x7e/0x170 ? syscall_exit_to_user_mode+0x4e/0x250 ? do_syscall_64+0x8a/0x170 ? do_syscall_64+0x8a/0x170 ? syscall_exit_to_user_mode+0x4e/0x250 ? do_syscall_64+0x8a/0x170 ? syscall_exit_to_user_mode+0x4e/0x250 ? do_syscall_64+0x8a/0x170 ? do_user_addr_fault+0x1d2/0x8d0 ? irqentry_exit_to_user_mode+0x43/0x250 ? irqentry_exit+0x43/0x50 ? exc_page_fault+0x93/0x1d0 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7262a6124ded Code: 04 25 28 00 00 00 48 89 45 c8 31 c0 48 8d 45 10 c7 45 b0 10 00 00 00 48 89 45 b8 48 8d 45 d0 48 89 45 c0 b8 10 00 00 00 0f 05 <89> c2 3d 00 f0 ff ff 77 1a 48 8b 45 c8 64 48 2b 04 25 28 00 00 00 RSP: 002b:00007fffd08c3960 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00007fffd08c39f0 RCX: 00007262a6124ded RDX: 00007fffd08c3a10 RSI: 00000000c0181b01 RDI: 0000000000000007 RBP: 00007fffd08c39b0 R08: 0000000014107820 R09: 00007fffd08c3b44 R10: 000000000000000c R11: 0000000000000246 R12: 00007fffd08c3b44 R13: 000000000000000c R14: 00007fffd08c3b58 R15: 0000000014107960 </TASK> Fixes: 1efe8c0 ("RDMA/core: Convert UMEM ODP DMA mapping to caching IOVA and page linkage") Closes: https://lore.kernel.org/all/[email protected]/ Signed-off-by: Daisuke Matsuda <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Leon Romanovsky <[email protected]>
1 parent 0b261d7 commit 259e9bd

File tree

2 files changed

+26
-8
lines changed

2 files changed

+26
-8
lines changed

drivers/infiniband/core/umem_odp.c

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,11 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
6060
{
6161
struct ib_device *dev = umem_odp->umem.ibdev;
6262
size_t page_size = 1UL << umem_odp->page_shift;
63+
struct hmm_dma_map *map;
6364
unsigned long start;
6465
unsigned long end;
65-
int ret;
66+
size_t nr_entries;
67+
int ret = 0;
6668

6769
umem_odp->umem.is_odp = 1;
6870
mutex_init(&umem_odp->umem_mutex);
@@ -75,9 +77,20 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
7577
if (unlikely(end < page_size))
7678
return -EOVERFLOW;
7779

78-
ret = hmm_dma_map_alloc(dev->dma_device, &umem_odp->map,
79-
(end - start) >> PAGE_SHIFT,
80-
1 << umem_odp->page_shift);
80+
nr_entries = (end - start) >> PAGE_SHIFT;
81+
if (!(nr_entries * PAGE_SIZE / page_size))
82+
return -EINVAL;
83+
84+
map = &umem_odp->map;
85+
if (ib_uses_virt_dma(dev)) {
86+
map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list),
87+
GFP_KERNEL | __GFP_NOWARN);
88+
if (!map->pfn_list)
89+
ret = -ENOMEM;
90+
} else
91+
ret = hmm_dma_map_alloc(dev->dma_device, map,
92+
(end - start) >> PAGE_SHIFT,
93+
1 << umem_odp->page_shift);
8194
if (ret)
8295
return ret;
8396

@@ -90,7 +103,10 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
90103
return 0;
91104

92105
out_free_map:
93-
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
106+
if (ib_uses_virt_dma(dev))
107+
kfree(map->pfn_list);
108+
else
109+
hmm_dma_map_free(dev->dma_device, map);
94110
return ret;
95111
}
96112

@@ -259,7 +275,10 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
259275
ib_umem_end(umem_odp));
260276
mutex_unlock(&umem_odp->umem_mutex);
261277
mmu_interval_notifier_remove(&umem_odp->notifier);
262-
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
278+
if (ib_uses_virt_dma(dev))
279+
kfree(umem_odp->map.pfn_list);
280+
else
281+
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
263282
}
264283

265284
void ib_umem_odp_release(struct ib_umem_odp *umem_odp)

mm/hmm.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -640,8 +640,7 @@ int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
640640
bool dma_need_sync = false;
641641
bool use_iova;
642642

643-
if (!(nr_entries * PAGE_SIZE / dma_entry_size))
644-
return -EINVAL;
643+
WARN_ON_ONCE(!(nr_entries * PAGE_SIZE / dma_entry_size));
645644

646645
/*
647646
* The HMM API violates our normal DMA buffer ownership rules and can't

0 commit comments

Comments
 (0)