Skip to content

Commit 369cd21

Browse files
mjkravetztorvalds
authored andcommitted
userfaultfd: hugetlbfs: userfaultfd_huge_must_wait for hugepmd ranges
Add routine userfaultfd_huge_must_wait which has the same functionality as the existing userfaultfd_must_wait routine. Only difference is that new routine must handle page table structure for hugepmd vmas. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Mike Kravetz <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Cc: "Dr. David Alan Gilbert" <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Michael Rapoport <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Pavel Emelyanov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 9903bd7 commit 369cd21

File tree

1 file changed

+49
-2
lines changed

1 file changed

+49
-2
lines changed

fs/userfaultfd.c

Lines changed: 49 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,49 @@ static inline struct uffd_msg userfault_msg(unsigned long address,
202202
return msg;
203203
}
204204

205+
#ifdef CONFIG_HUGETLB_PAGE
206+
/*
207+
* Same functionality as userfaultfd_must_wait below with modifications for
208+
* hugepmd ranges.
209+
*/
210+
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
211+
unsigned long address,
212+
unsigned long flags,
213+
unsigned long reason)
214+
{
215+
struct mm_struct *mm = ctx->mm;
216+
pte_t *pte;
217+
bool ret = true;
218+
219+
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
220+
221+
pte = huge_pte_offset(mm, address);
222+
if (!pte)
223+
goto out;
224+
225+
ret = false;
226+
227+
/*
228+
* Lockless access: we're in a wait_event so it's ok if it
229+
* changes under us.
230+
*/
231+
if (huge_pte_none(*pte))
232+
ret = true;
233+
if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
234+
ret = true;
235+
out:
236+
return ret;
237+
}
238+
#else
239+
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
240+
unsigned long address,
241+
unsigned long flags,
242+
unsigned long reason)
243+
{
244+
return false; /* should never get here */
245+
}
246+
#endif /* CONFIG_HUGETLB_PAGE */
247+
205248
/*
206249
* Verify the pagetables are still not ok after having reigstered into
207250
* the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
@@ -378,8 +421,12 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
378421
set_current_state(blocking_state);
379422
spin_unlock(&ctx->fault_pending_wqh.lock);
380423

381-
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
382-
reason);
424+
if (!is_vm_hugetlb_page(vmf->vma))
425+
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
426+
reason);
427+
else
428+
must_wait = userfaultfd_huge_must_wait(ctx, vmf->address,
429+
vmf->flags, reason);
383430
up_read(&mm->mmap_sem);
384431

385432
if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&

0 commit comments

Comments
 (0)