@@ -648,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
648
648
return count + 1 ;
649
649
}
650
650
651
- /*
652
- * Register a lock's class in the hash-table, if the class is not present
653
- * yet. Otherwise we look it up. We cache the result in the lock object
654
- * itself, so actual lookup of the hash should be once per lock object.
655
- */
656
651
static inline struct lock_class *
657
- look_up_lock_class (struct lockdep_map * lock , unsigned int subclass )
652
+ look_up_lock_class (const struct lockdep_map * lock , unsigned int subclass )
658
653
{
659
654
struct lockdep_subclass_key * key ;
660
655
struct hlist_head * hash_head ;
661
656
struct lock_class * class ;
662
- bool is_static = false;
663
657
664
658
if (unlikely (subclass >= MAX_LOCKDEP_SUBCLASSES )) {
665
659
debug_locks_off ();
@@ -672,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
672
666
}
673
667
674
668
/*
675
- * Static locks do not have their class-keys yet - for them the key
676
- * is the lock object itself. If the lock is in the per cpu area,
677
- * the canonical address of the lock (per cpu offset removed) is
678
- * used.
669
+ * If it is not initialised then it has never been locked,
670
+ * so it won't be present in the hash table.
679
671
*/
680
- if (unlikely (!lock -> key )) {
681
- unsigned long can_addr , addr = (unsigned long )lock ;
682
-
683
- if (__is_kernel_percpu_address (addr , & can_addr ))
684
- lock -> key = (void * )can_addr ;
685
- else if (__is_module_percpu_address (addr , & can_addr ))
686
- lock -> key = (void * )can_addr ;
687
- else if (static_obj (lock ))
688
- lock -> key = (void * )lock ;
689
- else
690
- return ERR_PTR (- EINVAL );
691
- is_static = true;
692
- }
672
+ if (unlikely (!lock -> key ))
673
+ return NULL ;
693
674
694
675
/*
695
676
* NOTE: the class-key must be unique. For dynamic locks, a static
@@ -721,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
721
702
}
722
703
}
723
704
724
- return is_static || static_obj (lock -> key ) ? NULL : ERR_PTR (- EINVAL );
705
+ return NULL ;
706
+ }
707
+
708
+ /*
709
+ * Static locks do not have their class-keys yet - for them the key is
710
+ * the lock object itself. If the lock is in the per cpu area, the
711
+ * canonical address of the lock (per cpu offset removed) is used.
712
+ */
713
+ static bool assign_lock_key (struct lockdep_map * lock )
714
+ {
715
+ unsigned long can_addr , addr = (unsigned long )lock ;
716
+
717
+ if (__is_kernel_percpu_address (addr , & can_addr ))
718
+ lock -> key = (void * )can_addr ;
719
+ else if (__is_module_percpu_address (addr , & can_addr ))
720
+ lock -> key = (void * )can_addr ;
721
+ else if (static_obj (lock ))
722
+ lock -> key = (void * )lock ;
723
+ else {
724
+ /* Debug-check: all keys must be persistent! */
725
+ debug_locks_off ();
726
+ pr_err ("INFO: trying to register non-static key.\n" );
727
+ pr_err ("the code is fine but needs lockdep annotation.\n" );
728
+ pr_err ("turning off the locking correctness validator.\n" );
729
+ dump_stack ();
730
+ return false;
731
+ }
732
+
733
+ return true;
725
734
}
726
735
727
736
/*
@@ -739,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
739
748
DEBUG_LOCKS_WARN_ON (!irqs_disabled ());
740
749
741
750
class = look_up_lock_class (lock , subclass );
742
- if (likely (! IS_ERR_OR_NULL ( class ) ))
751
+ if (likely (class ))
743
752
goto out_set_class_cache ;
744
753
745
- /*
746
- * Debug-check: all keys must be persistent!
747
- */
748
- if (IS_ERR (class )) {
749
- debug_locks_off ();
750
- printk ("INFO: trying to register non-static key.\n" );
751
- printk ("the code is fine but needs lockdep annotation.\n" );
752
- printk ("turning off the locking correctness validator.\n" );
753
- dump_stack ();
754
+ if (!lock -> key ) {
755
+ if (!assign_lock_key (lock ))
756
+ return NULL ;
757
+ } else if (!static_obj (lock -> key )) {
754
758
return NULL ;
755
759
}
756
760
@@ -3273,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
3273
3277
return 0 ;
3274
3278
}
3275
3279
3276
- static int __lock_is_held (struct lockdep_map * lock , int read );
3280
+ static int __lock_is_held (const struct lockdep_map * lock , int read );
3277
3281
3278
3282
/*
3279
3283
* This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3482,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3482
3486
return 0 ;
3483
3487
}
3484
3488
3485
- static int match_held_lock (struct held_lock * hlock , struct lockdep_map * lock )
3489
+ static int match_held_lock (const struct held_lock * hlock ,
3490
+ const struct lockdep_map * lock )
3486
3491
{
3487
3492
if (hlock -> instance == lock )
3488
3493
return 1 ;
3489
3494
3490
3495
if (hlock -> references ) {
3491
- struct lock_class * class = lock -> class_cache [0 ];
3496
+ const struct lock_class * class = lock -> class_cache [0 ];
3492
3497
3493
3498
if (!class )
3494
3499
class = look_up_lock_class (lock , 0 );
@@ -3499,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3499
3504
* Clearly if the lock hasn't been acquired _ever_, we're not
3500
3505
* holding it either, so report failure.
3501
3506
*/
3502
- if (IS_ERR_OR_NULL ( class ) )
3507
+ if (! class )
3503
3508
return 0 ;
3504
3509
3505
3510
/*
@@ -3724,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3724
3729
return 1 ;
3725
3730
}
3726
3731
3727
- static int __lock_is_held (struct lockdep_map * lock , int read )
3732
+ static int __lock_is_held (const struct lockdep_map * lock , int read )
3728
3733
{
3729
3734
struct task_struct * curr = current ;
3730
3735
int i ;
@@ -3938,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
3938
3943
}
3939
3944
EXPORT_SYMBOL_GPL (lock_release );
3940
3945
3941
- int lock_is_held_type (struct lockdep_map * lock , int read )
3946
+ int lock_is_held_type (const struct lockdep_map * lock , int read )
3942
3947
{
3943
3948
unsigned long flags ;
3944
3949
int ret = 0 ;
@@ -4295,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
4295
4300
* If the class exists we look it up and zap it:
4296
4301
*/
4297
4302
class = look_up_lock_class (lock , j );
4298
- if (! IS_ERR_OR_NULL ( class ) )
4303
+ if (class )
4299
4304
zap_class (class );
4300
4305
}
4301
4306
/*
0 commit comments