Skip to content

Commit 8b35ca3

Browse files
bwhackstorvalds
authored andcommitted
arm/mm: Convert to using lock_mm_and_find_vma()
arm has an additional check for address < FIRST_USER_ADDRESS before expanding the stack. Since FIRST_USER_ADDRESS is defined everywhere (generally as 0), move that check to the generic expand_downwards(). Signed-off-by: Ben Hutchings <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 7267ef7 commit 8b35ca3

File tree

3 files changed

+16
-50
lines changed

3 files changed

+16
-50
lines changed

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ config ARM
125125
select HAVE_UID16
126126
select HAVE_VIRT_CPU_ACCOUNTING_GEN
127127
select IRQ_FORCED_THREADING
128+
select LOCK_MM_AND_FIND_VMA
128129
select MODULES_USE_ELF_REL
129130
select NEED_DMA_MAP_STATE
130131
select OF_EARLY_FLATTREE if OF

arch/arm/mm/fault.c

Lines changed: 14 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr)
232232
return false;
233233
}
234234

235-
static vm_fault_t __kprobes
236-
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
237-
unsigned long vma_flags, struct pt_regs *regs)
238-
{
239-
struct vm_area_struct *vma = find_vma(mm, addr);
240-
if (unlikely(!vma))
241-
return VM_FAULT_BADMAP;
242-
243-
if (unlikely(vma->vm_start > addr)) {
244-
if (!(vma->vm_flags & VM_GROWSDOWN))
245-
return VM_FAULT_BADMAP;
246-
if (addr < FIRST_USER_ADDRESS)
247-
return VM_FAULT_BADMAP;
248-
if (expand_stack(vma, addr))
249-
return VM_FAULT_BADMAP;
250-
}
251-
252-
/*
253-
* ok, we have a good vm_area for this memory access, check the
254-
* permissions on the VMA allow for the fault which occurred.
255-
*/
256-
if (!(vma->vm_flags & vma_flags))
257-
return VM_FAULT_BADACCESS;
258-
259-
return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
260-
}
261-
262235
static int __kprobes
263236
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
264237
{
265238
struct mm_struct *mm = current->mm;
239+
struct vm_area_struct *vma;
266240
int sig, code;
267241
vm_fault_t fault;
268242
unsigned int flags = FAULT_FLAG_DEFAULT;
@@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
301275

302276
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
303277

304-
/*
305-
* As per x86, we may deadlock here. However, since the kernel only
306-
* validly references user space from well defined areas of the code,
307-
* we can bug out early if this is from code which shouldn't.
308-
*/
309-
if (!mmap_read_trylock(mm)) {
310-
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
311-
goto no_context;
312278
retry:
313-
mmap_read_lock(mm);
314-
} else {
315-
/*
316-
* The above down_read_trylock() might have succeeded in
317-
* which case, we'll have missed the might_sleep() from
318-
* down_read()
319-
*/
320-
might_sleep();
321-
#ifdef CONFIG_DEBUG_VM
322-
if (!user_mode(regs) &&
323-
!search_exception_tables(regs->ARM_pc))
324-
goto no_context;
325-
#endif
279+
vma = lock_mm_and_find_vma(mm, addr, regs);
280+
if (unlikely(!vma)) {
281+
fault = VM_FAULT_BADMAP;
282+
goto bad_area;
326283
}
327284

328-
fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
285+
/*
286+
* ok, we have a good vm_area for this memory access, check the
287+
* permissions on the VMA allow for the fault which occurred.
288+
*/
289+
if (!(vma->vm_flags & vm_flags))
290+
fault = VM_FAULT_BADACCESS;
291+
else
292+
fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
329293

330294
/* If we need to retry but a fatal signal is pending, handle the
331295
* signal first. We do not need to release the mmap_lock because
@@ -356,6 +320,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
356320
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
357321
return 0;
358322

323+
bad_area:
359324
/*
360325
* If we are in kernel mode at this point, we
361326
* have no context to handle this fault with.

mm/mmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2036,7 +2036,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
20362036
int error = 0;
20372037

20382038
address &= PAGE_MASK;
2039-
if (address < mmap_min_addr)
2039+
if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
20402040
return -EPERM;
20412041

20422042
/* Enforce stack_guard_gap */

0 commit comments

Comments
 (0)