@@ -124,8 +124,8 @@ int rxe_odp_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
124
124
return err ;
125
125
}
126
126
127
- static inline bool rxe_check_pagefault (struct ib_umem_odp * umem_odp ,
128
- u64 iova , int length , u32 perm )
127
+ static inline bool rxe_check_pagefault (struct ib_umem_odp * umem_odp , u64 iova ,
128
+ int length )
129
129
{
130
130
bool need_fault = false;
131
131
u64 addr ;
@@ -137,7 +137,7 @@ static inline bool rxe_check_pagefault(struct ib_umem_odp *umem_odp,
137
137
while (addr < iova + length ) {
138
138
idx = (addr - ib_umem_start (umem_odp )) >> umem_odp -> page_shift ;
139
139
140
- if (!(umem_odp -> map .pfn_list [idx ] & perm )) {
140
+ if (!(umem_odp -> map .pfn_list [idx ] & HMM_PFN_VALID )) {
141
141
need_fault = true;
142
142
break ;
143
143
}
@@ -161,18 +161,14 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
161
161
{
162
162
struct ib_umem_odp * umem_odp = to_ib_umem_odp (mr -> umem );
163
163
bool need_fault ;
164
- u64 perm = 0 ;
165
164
int err ;
166
165
167
166
if (unlikely (length < 1 ))
168
167
return - EINVAL ;
169
168
170
- if (!(flags & RXE_PAGEFAULT_RDONLY ))
171
- perm |= HMM_PFN_WRITE ;
172
-
173
169
mutex_lock (& umem_odp -> umem_mutex );
174
170
175
- need_fault = rxe_check_pagefault (umem_odp , iova , length , perm );
171
+ need_fault = rxe_check_pagefault (umem_odp , iova , length );
176
172
if (need_fault ) {
177
173
mutex_unlock (& umem_odp -> umem_mutex );
178
174
@@ -182,7 +178,7 @@ static int rxe_odp_map_range_and_lock(struct rxe_mr *mr, u64 iova, int length, u
182
178
if (err < 0 )
183
179
return err ;
184
180
185
- need_fault = rxe_check_pagefault (umem_odp , iova , length , perm );
181
+ need_fault = rxe_check_pagefault (umem_odp , iova , length );
186
182
if (need_fault )
187
183
return - EFAULT ;
188
184
}
0 commit comments