@@ -655,62 +655,29 @@ static void set_signal_archinfo(unsigned long address,
655
655
}
656
656
657
657
static noinline void
658
- no_context (struct pt_regs * regs , unsigned long error_code ,
659
- unsigned long address , int signal , int si_code )
658
+ page_fault_oops (struct pt_regs * regs , unsigned long error_code ,
659
+ unsigned long address )
660
660
{
661
- struct task_struct * tsk = current ;
662
661
unsigned long flags ;
663
662
int sig ;
664
663
665
664
if (user_mode (regs )) {
666
665
/*
667
- * This is an implicit supervisor-mode access from user
668
- * mode. Bypass all the kernel-mode recovery code and just
669
- * OOPS.
666
+ * Implicit kernel access from user mode? Skip the stack
667
+ * overflow and EFI special cases.
670
668
*/
671
669
goto oops ;
672
670
}
673
671
674
- /* Are we prepared to handle this kernel fault? */
675
- if (fixup_exception (regs , X86_TRAP_PF , error_code , address )) {
676
- /*
677
- * Any interrupt that takes a fault gets the fixup. This makes
678
- * the below recursive fault logic only apply to a faults from
679
- * task context.
680
- */
681
- if (in_interrupt ())
682
- return ;
683
-
684
- /*
685
- * Per the above we're !in_interrupt(), aka. task context.
686
- *
687
- * In this case we need to make sure we're not recursively
688
- * faulting through the emulate_vsyscall() logic.
689
- */
690
- if (current -> thread .sig_on_uaccess_err && signal ) {
691
- sanitize_error_code (address , & error_code );
692
-
693
- set_signal_archinfo (address , error_code );
694
-
695
- /* XXX: hwpoison faults will set the wrong code. */
696
- force_sig_fault (signal , si_code , (void __user * )address );
697
- }
698
-
699
- /*
700
- * Barring that, we can do the fixup and be happy.
701
- */
702
- return ;
703
- }
704
-
705
672
#ifdef CONFIG_VMAP_STACK
706
673
/*
707
674
* Stack overflow? During boot, we can fault near the initial
708
675
* stack in the direct map, but that's not an overflow -- check
709
676
* that we're in vmalloc space to avoid this.
710
677
*/
711
678
if (is_vmalloc_addr ((void * )address ) &&
712
- (((unsigned long )tsk -> stack - 1 - address < PAGE_SIZE ) ||
713
- address - ((unsigned long )tsk -> stack + THREAD_SIZE ) < PAGE_SIZE )) {
679
+ (((unsigned long )current -> stack - 1 - address < PAGE_SIZE ) ||
680
+ address - ((unsigned long )current -> stack + THREAD_SIZE ) < PAGE_SIZE )) {
714
681
unsigned long stack = __this_cpu_ist_top_va (DF ) - sizeof (void * );
715
682
/*
716
683
* We're likely to be running with very little stack space
@@ -733,20 +700,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
733
700
}
734
701
#endif
735
702
736
- /*
737
- * 32-bit:
738
- *
739
- * Valid to do another page fault here, because if this fault
740
- * had been triggered by is_prefetch fixup_exception would have
741
- * handled it.
742
- *
743
- * 64-bit:
744
- *
745
- * Hall of shame of CPU/BIOS bugs.
746
- */
747
- if (is_prefetch (regs , error_code , address ))
748
- return ;
749
-
750
703
/*
751
704
* Buggy firmware could access regions which might page fault, try to
752
705
* recover from such faults.
@@ -763,7 +716,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
763
716
764
717
show_fault_oops (regs , error_code , address );
765
718
766
- if (task_stack_end_corrupted (tsk ))
719
+ if (task_stack_end_corrupted (current ))
767
720
printk (KERN_EMERG "Thread overran stack, or stack corrupted\n" );
768
721
769
722
sig = SIGKILL ;
@@ -776,6 +729,61 @@ no_context(struct pt_regs *regs, unsigned long error_code,
776
729
oops_end (flags , regs , sig );
777
730
}
778
731
732
+ static noinline void
733
+ no_context (struct pt_regs * regs , unsigned long error_code ,
734
+ unsigned long address , int signal , int si_code )
735
+ {
736
+ if (user_mode (regs )) {
737
+ /*
738
+ * This is an implicit supervisor-mode access from user
739
+ * mode. Bypass all the kernel-mode recovery code and just
740
+ * OOPS.
741
+ */
742
+ goto oops ;
743
+ }
744
+
745
+ /* Are we prepared to handle this kernel fault? */
746
+ if (fixup_exception (regs , X86_TRAP_PF , error_code , address )) {
747
+ /*
748
+ * Any interrupt that takes a fault gets the fixup. This makes
749
+ * the below recursive fault logic only apply to a faults from
750
+ * task context.
751
+ */
752
+ if (in_interrupt ())
753
+ return ;
754
+
755
+ /*
756
+ * Per the above we're !in_interrupt(), aka. task context.
757
+ *
758
+ * In this case we need to make sure we're not recursively
759
+ * faulting through the emulate_vsyscall() logic.
760
+ */
761
+ if (current -> thread .sig_on_uaccess_err && signal ) {
762
+ sanitize_error_code (address , & error_code );
763
+
764
+ set_signal_archinfo (address , error_code );
765
+
766
+ /* XXX: hwpoison faults will set the wrong code. */
767
+ force_sig_fault (signal , si_code , (void __user * )address );
768
+ }
769
+
770
+ /*
771
+ * Barring that, we can do the fixup and be happy.
772
+ */
773
+ return ;
774
+ }
775
+
776
+ /*
777
+ * AMD erratum #91 manifests as a spurious page fault on a PREFETCH
778
+ * instruction.
779
+ */
780
+ if (is_prefetch (regs , error_code , address ))
781
+ return ;
782
+
783
+ oops :
784
+ page_fault_oops (regs , error_code , address );
785
+ }
786
+
779
787
/*
780
788
* Print out info about fatal segfaults, if the show_unhandled_signals
781
789
* sysctl is set:
0 commit comments