@@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr)
232
232
return false;
233
233
}
234
234
235
- static vm_fault_t __kprobes
236
- __do_page_fault (struct mm_struct * mm , unsigned long addr , unsigned int flags ,
237
- unsigned long vma_flags , struct pt_regs * regs )
238
- {
239
- struct vm_area_struct * vma = find_vma (mm , addr );
240
- if (unlikely (!vma ))
241
- return VM_FAULT_BADMAP ;
242
-
243
- if (unlikely (vma -> vm_start > addr )) {
244
- if (!(vma -> vm_flags & VM_GROWSDOWN ))
245
- return VM_FAULT_BADMAP ;
246
- if (addr < FIRST_USER_ADDRESS )
247
- return VM_FAULT_BADMAP ;
248
- if (expand_stack (vma , addr ))
249
- return VM_FAULT_BADMAP ;
250
- }
251
-
252
- /*
253
- * ok, we have a good vm_area for this memory access, check the
254
- * permissions on the VMA allow for the fault which occurred.
255
- */
256
- if (!(vma -> vm_flags & vma_flags ))
257
- return VM_FAULT_BADACCESS ;
258
-
259
- return handle_mm_fault (vma , addr & PAGE_MASK , flags , regs );
260
- }
261
-
262
235
static int __kprobes
263
236
do_page_fault (unsigned long addr , unsigned int fsr , struct pt_regs * regs )
264
237
{
265
238
struct mm_struct * mm = current -> mm ;
239
+ struct vm_area_struct * vma ;
266
240
int sig , code ;
267
241
vm_fault_t fault ;
268
242
unsigned int flags = FAULT_FLAG_DEFAULT ;
@@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
301
275
302
276
perf_sw_event (PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , addr );
303
277
304
- /*
305
- * As per x86, we may deadlock here. However, since the kernel only
306
- * validly references user space from well defined areas of the code,
307
- * we can bug out early if this is from code which shouldn't.
308
- */
309
- if (!mmap_read_trylock (mm )) {
310
- if (!user_mode (regs ) && !search_exception_tables (regs -> ARM_pc ))
311
- goto no_context ;
312
278
retry :
313
- mmap_read_lock (mm );
314
- } else {
315
- /*
316
- * The above down_read_trylock() might have succeeded in
317
- * which case, we'll have missed the might_sleep() from
318
- * down_read()
319
- */
320
- might_sleep ();
321
- #ifdef CONFIG_DEBUG_VM
322
- if (!user_mode (regs ) &&
323
- !search_exception_tables (regs -> ARM_pc ))
324
- goto no_context ;
325
- #endif
279
+ vma = lock_mm_and_find_vma (mm , addr , regs );
280
+ if (unlikely (!vma )) {
281
+ fault = VM_FAULT_BADMAP ;
282
+ goto bad_area ;
326
283
}
327
284
328
- fault = __do_page_fault (mm , addr , flags , vm_flags , regs );
285
+ /*
286
+ * ok, we have a good vm_area for this memory access, check the
287
+ * permissions on the VMA allow for the fault which occurred.
288
+ */
289
+ if (!(vma -> vm_flags & vm_flags ))
290
+ fault = VM_FAULT_BADACCESS ;
291
+ else
292
+ fault = handle_mm_fault (vma , addr & PAGE_MASK , flags , regs );
329
293
330
294
/* If we need to retry but a fatal signal is pending, handle the
331
295
* signal first. We do not need to release the mmap_lock because
@@ -356,6 +320,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
356
320
if (likely (!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS ))))
357
321
return 0 ;
358
322
323
+ bad_area :
359
324
/*
360
325
* If we are in kernel mode at this point, we
361
326
* have no context to handle this fault with.
0 commit comments