Skip to content

Commit 0b261d7

Browse files
committed
RDMA/rxe: Break endless pagefault loop for RO pages
RO pages has "perm" equal to 0, that caused to the situation where such pages were marked as needed to have fault and caused to infinite loop. Fixes: eedd5b1 ("RDMA/umem: Store ODP access mask information in PFN") Reported-by: Daisuke Matsuda <[email protected]> Closes: https://lore.kernel.org/all/[email protected]/ Link: https://patch.msgid.link/096fab178d48ed86942ee22eafe9be98e29092aa.1747913377.git.leonro@nvidia.com Tested-by: Daisuke Matsuda <[email protected]> Signed-off-by: Leon Romanovsky <[email protected]>
1 parent 990b5c0 commit 0b261d7

File tree

1 file changed

+5
-9
lines changed

1 file changed

+5
-9
lines changed

drivers/infiniband/sw/rxe/rxe_odp.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
124124
return err;
125125
}
126126

127-
static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
128-
u64 iova, int length, u32 perm)
127+
static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp, u64 iova,
128+
int length)
129129
{
130130
bool need_fault = false;
131131
u64 addr;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
137137
while (addr < iova + length) {
138138
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
139139

140-
if (!(umem_odp->map.pfn_list[idx] & perm)) {
140+
if (!(umem_odp->map.pfn_list[idx] & HMM_PFN_VALID)) {
141141
need_fault = true;
142142
break;
143143
}
@@ -161,18 +161,14 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
161161
{
162162
struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem);
163163
bool need_fault;
164-
u64 perm = 0;
165164
int err;
166165

167166
if (unlikely(length < 1))
168167
return -EINVAL;
169168

170-
if (!(flags & RXE_PAGEFAULT_RDONLY))
171-
perm |= HMM_PFN_WRITE;
172-
173169
mutex_lock(&umem_odp->umem_mutex);
174170

175-
need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
171+
need_fault = rxe_check_pagefault(umem_odp, iova, length);
176172
if (need_fault) {
177173
mutex_unlock(&umem_odp->umem_mutex);
178174

@@ -182,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
182178
if (err < 0)
183179
return err;
184180

185-
need_fault = rxe_check_pagefault(umem_odp, iova, length, perm);
181+
need_fault = rxe_check_pagefault(umem_odp, iova, length);
186182
if (need_fault)
187183
return -EFAULT;
188184
}

0 commit comments

Comments
 (0)