Skip to content

Commit 859d069

Browse files
author
Peter Zijlstra
committed
lockdep: Prepare for NMI IRQ state tracking
There is no reason not to always, accurately, track IRQ state. This change also makes IRQ state tracking ignore lockdep_off(). Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Ingo Molnar <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 248591f commit 859d069

File tree

1 file changed

+42
-4
lines changed

1 file changed

+42
-4
lines changed

kernel/locking/lockdep.c

Lines changed: 42 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ void lockdep_init_task(struct task_struct *task)
395395

396396
static __always_inline void lockdep_recursion_finish(void)
397397
{
398-
if (WARN_ON_ONCE(--current->lockdep_recursion))
398+
if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
399399
current->lockdep_recursion = 0;
400400
}
401401

@@ -3646,7 +3646,16 @@ static void __trace_hardirqs_on_caller(void)
36463646
*/
36473647
void lockdep_hardirqs_on_prepare(unsigned long ip)
36483648
{
3649-
if (unlikely(!debug_locks || current->lockdep_recursion))
3649+
if (unlikely(!debug_locks))
3650+
return;
3651+
3652+
/*
3653+
* NMIs do not (and cannot) track lock dependencies, nothing to do.
3654+
*/
3655+
if (unlikely(in_nmi()))
3656+
return;
3657+
3658+
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
36503659
return;
36513660

36523661
if (unlikely(current->hardirqs_enabled)) {
@@ -3692,7 +3701,27 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
36923701
{
36933702
struct task_struct *curr = current;
36943703

3695-
if (unlikely(!debug_locks || curr->lockdep_recursion))
3704+
if (unlikely(!debug_locks))
3705+
return;
3706+
3707+
/*
3708+
* NMIs can happen in the middle of local_irq_{en,dis}able() where the
3709+
* tracking state and hardware state are out of sync.
3710+
*
3711+
* NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
3712+
* and not rely on hardware state like normal interrupts.
3713+
*/
3714+
if (unlikely(in_nmi())) {
3715+
/*
3716+
* Skip:
3717+
* - recursion check, because NMI can hit lockdep;
3718+
* - hardware state check, because above;
3719+
* - chain_key check, see lockdep_hardirqs_on_prepare().
3720+
*/
3721+
goto skip_checks;
3722+
}
3723+
3724+
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
36963725
return;
36973726

36983727
if (curr->hardirqs_enabled) {
@@ -3720,6 +3749,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
37203749
DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
37213750
current->curr_chain_key);
37223751

3752+
skip_checks:
37233753
/* we'll do an OFF -> ON transition: */
37243754
curr->hardirqs_enabled = 1;
37253755
curr->hardirq_enable_ip = ip;
@@ -3735,7 +3765,15 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
37353765
{
37363766
struct task_struct *curr = current;
37373767

3738-
if (unlikely(!debug_locks || curr->lockdep_recursion))
3768+
if (unlikely(!debug_locks))
3769+
return;
3770+
3771+
/*
3772+
* Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
3773+
* they will restore the software state. This ensures the software
3774+
* state is consistent inside NMIs as well.
3775+
*/
3776+
if (unlikely(!in_nmi() && (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)))
37393777
return;
37403778

37413779
/*

0 commit comments

Comments
 (0)