@@ -395,7 +395,7 @@ void lockdep_init_task(struct task_struct *task)
395
395
396
396
static __always_inline void lockdep_recursion_finish (void )
397
397
{
398
- if (WARN_ON_ONCE (-- current -> lockdep_recursion ))
398
+ if (WARN_ON_ONCE (( -- current -> lockdep_recursion ) & LOCKDEP_RECURSION_MASK ))
399
399
current -> lockdep_recursion = 0 ;
400
400
}
401
401
@@ -3646,7 +3646,16 @@ static void __trace_hardirqs_on_caller(void)
3646
3646
*/
3647
3647
void lockdep_hardirqs_on_prepare (unsigned long ip )
3648
3648
{
3649
- if (unlikely (!debug_locks || current -> lockdep_recursion ))
3649
+ if (unlikely (!debug_locks ))
3650
+ return ;
3651
+
3652
+ /*
3653
+ * NMIs do not (and cannot) track lock dependencies, nothing to do.
3654
+ */
3655
+ if (unlikely (in_nmi ()))
3656
+ return ;
3657
+
3658
+ if (unlikely (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK ))
3650
3659
return ;
3651
3660
3652
3661
if (unlikely (current -> hardirqs_enabled )) {
@@ -3692,7 +3701,27 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
3692
3701
{
3693
3702
struct task_struct * curr = current ;
3694
3703
3695
- if (unlikely (!debug_locks || curr -> lockdep_recursion ))
3704
+ if (unlikely (!debug_locks ))
3705
+ return ;
3706
+
3707
+ /*
3708
+ * NMIs can happen in the middle of local_irq_{en,dis}able() where the
3709
+ * tracking state and hardware state are out of sync.
3710
+ *
3711
+ * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
3712
+ * and not rely on hardware state like normal interrupts.
3713
+ */
3714
+ if (unlikely (in_nmi ())) {
3715
+ /*
3716
+ * Skip:
3717
+ * - recursion check, because NMI can hit lockdep;
3718
+ * - hardware state check, because above;
3719
+ * - chain_key check, see lockdep_hardirqs_on_prepare().
3720
+ */
3721
+ goto skip_checks ;
3722
+ }
3723
+
3724
+ if (unlikely (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK ))
3696
3725
return ;
3697
3726
3698
3727
if (curr -> hardirqs_enabled ) {
@@ -3720,6 +3749,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
3720
3749
DEBUG_LOCKS_WARN_ON (current -> hardirq_chain_key !=
3721
3750
current -> curr_chain_key );
3722
3751
3752
+ skip_checks :
3723
3753
/* we'll do an OFF -> ON transition: */
3724
3754
curr -> hardirqs_enabled = 1 ;
3725
3755
curr -> hardirq_enable_ip = ip ;
@@ -3735,7 +3765,15 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
3735
3765
{
3736
3766
struct task_struct * curr = current ;
3737
3767
3738
- if (unlikely (!debug_locks || curr -> lockdep_recursion ))
3768
+ if (unlikely (!debug_locks ))
3769
+ return ;
3770
+
3771
+ /*
3772
+ * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
3773
+ * they will restore the software state. This ensures the software
3774
+ * state is consistent inside NMIs as well.
3775
+ */
3776
+ if (unlikely (!in_nmi () && (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK )))
3739
3777
return ;
3740
3778
3741
3779
/*
0 commit comments