Skip to content

Commit 5e7481a

Browse files
committed
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes relate to making lock_is_held() et al (and external wrappers of them) work on const data types - this requires const propagation through the depths of lockdep. This removes a number of ugly type hacks the external helpers used" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: lockdep: Convert some users to const lockdep: Make lockdep checking constant lockdep: Assign lock keys on registration
2 parents b8dbf73 + 05b9380 commit 5e7481a

File tree

5 files changed

+53
-50
lines changed

5 files changed

+53
-50
lines changed

include/linux/backing-dev.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
332332
* holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
333333
* associated wb's list_lock.
334334
*/
335-
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
335+
static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
336336
{
337337
#ifdef CONFIG_LOCKDEP
338338
WARN_ON_ONCE(debug_locks &&

include/linux/lockdep.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
337337
/*
338338
* Same "read" as for lock_acquire(), except -1 means any.
339339
*/
340-
extern int lock_is_held_type(struct lockdep_map *lock, int read);
340+
extern int lock_is_held_type(const struct lockdep_map *lock, int read);
341341

342-
static inline int lock_is_held(struct lockdep_map *lock)
342+
static inline int lock_is_held(const struct lockdep_map *lock)
343343
{
344344
return lock_is_held_type(lock, -1);
345345
}

include/linux/srcu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
9292
* relies on normal RCU, it can be called from the CPU which
9393
* is in the idle loop from an RCU point of view or offline.
9494
*/
95-
static inline int srcu_read_lock_held(struct srcu_struct *sp)
95+
static inline int srcu_read_lock_held(const struct srcu_struct *sp)
9696
{
9797
if (!debug_lockdep_rcu_enabled())
9898
return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
101101

102102
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
103103

104-
static inline int srcu_read_lock_held(struct srcu_struct *sp)
104+
static inline int srcu_read_lock_held(const struct srcu_struct *sp)
105105
{
106106
return 1;
107107
}

include/net/sock.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1445,10 +1445,8 @@ do { \
14451445
} while (0)
14461446

14471447
#ifdef CONFIG_LOCKDEP
1448-
static inline bool lockdep_sock_is_held(const struct sock *csk)
1448+
static inline bool lockdep_sock_is_held(const struct sock *sk)
14491449
{
1450-
struct sock *sk = (struct sock *)csk;
1451-
14521450
return lockdep_is_held(&sk->sk_lock) ||
14531451
lockdep_is_held(&sk->sk_lock.slock);
14541452
}

kernel/locking/lockdep.c

Lines changed: 47 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -648,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
648648
return count + 1;
649649
}
650650

651-
/*
652-
* Register a lock's class in the hash-table, if the class is not present
653-
* yet. Otherwise we look it up. We cache the result in the lock object
654-
* itself, so actual lookup of the hash should be once per lock object.
655-
*/
656651
static inline struct lock_class *
657-
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
652+
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
658653
{
659654
struct lockdep_subclass_key *key;
660655
struct hlist_head *hash_head;
661656
struct lock_class *class;
662-
bool is_static = false;
663657

664658
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
665659
debug_locks_off();
@@ -672,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
672666
}
673667

674668
/*
675-
* Static locks do not have their class-keys yet - for them the key
676-
* is the lock object itself. If the lock is in the per cpu area,
677-
* the canonical address of the lock (per cpu offset removed) is
678-
* used.
669+
* If it is not initialised then it has never been locked,
670+
* so it won't be present in the hash table.
679671
*/
680-
if (unlikely(!lock->key)) {
681-
unsigned long can_addr, addr = (unsigned long)lock;
682-
683-
if (__is_kernel_percpu_address(addr, &can_addr))
684-
lock->key = (void *)can_addr;
685-
else if (__is_module_percpu_address(addr, &can_addr))
686-
lock->key = (void *)can_addr;
687-
else if (static_obj(lock))
688-
lock->key = (void *)lock;
689-
else
690-
return ERR_PTR(-EINVAL);
691-
is_static = true;
692-
}
672+
if (unlikely(!lock->key))
673+
return NULL;
693674

694675
/*
695676
* NOTE: the class-key must be unique. For dynamic locks, a static
@@ -721,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
721702
}
722703
}
723704

724-
return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
705+
return NULL;
706+
}
707+
708+
/*
709+
* Static locks do not have their class-keys yet - for them the key is
710+
* the lock object itself. If the lock is in the per cpu area, the
711+
* canonical address of the lock (per cpu offset removed) is used.
712+
*/
713+
static bool assign_lock_key(struct lockdep_map *lock)
714+
{
715+
unsigned long can_addr, addr = (unsigned long)lock;
716+
717+
if (__is_kernel_percpu_address(addr, &can_addr))
718+
lock->key = (void *)can_addr;
719+
else if (__is_module_percpu_address(addr, &can_addr))
720+
lock->key = (void *)can_addr;
721+
else if (static_obj(lock))
722+
lock->key = (void *)lock;
723+
else {
724+
/* Debug-check: all keys must be persistent! */
725+
debug_locks_off();
726+
pr_err("INFO: trying to register non-static key.\n");
727+
pr_err("the code is fine but needs lockdep annotation.\n");
728+
pr_err("turning off the locking correctness validator.\n");
729+
dump_stack();
730+
return false;
731+
}
732+
733+
return true;
725734
}
726735

727736
/*
@@ -739,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
739748
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
740749

741750
class = look_up_lock_class(lock, subclass);
742-
if (likely(!IS_ERR_OR_NULL(class)))
751+
if (likely(class))
743752
goto out_set_class_cache;
744753

745-
/*
746-
* Debug-check: all keys must be persistent!
747-
*/
748-
if (IS_ERR(class)) {
749-
debug_locks_off();
750-
printk("INFO: trying to register non-static key.\n");
751-
printk("the code is fine but needs lockdep annotation.\n");
752-
printk("turning off the locking correctness validator.\n");
753-
dump_stack();
754+
if (!lock->key) {
755+
if (!assign_lock_key(lock))
756+
return NULL;
757+
} else if (!static_obj(lock->key)) {
754758
return NULL;
755759
}
756760

@@ -3273,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
32733277
return 0;
32743278
}
32753279

3276-
static int __lock_is_held(struct lockdep_map *lock, int read);
3280+
static int __lock_is_held(const struct lockdep_map *lock, int read);
32773281

32783282
/*
32793283
* This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3482,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
34823486
return 0;
34833487
}
34843488

3485-
static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3489+
static int match_held_lock(const struct held_lock *hlock,
3490+
const struct lockdep_map *lock)
34863491
{
34873492
if (hlock->instance == lock)
34883493
return 1;
34893494

34903495
if (hlock->references) {
3491-
struct lock_class *class = lock->class_cache[0];
3496+
const struct lock_class *class = lock->class_cache[0];
34923497

34933498
if (!class)
34943499
class = look_up_lock_class(lock, 0);
@@ -3499,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
34993504
* Clearly if the lock hasn't been acquired _ever_, we're not
35003505
* holding it either, so report failure.
35013506
*/
3502-
if (IS_ERR_OR_NULL(class))
3507+
if (!class)
35033508
return 0;
35043509

35053510
/*
@@ -3724,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
37243729
return 1;
37253730
}
37263731

3727-
static int __lock_is_held(struct lockdep_map *lock, int read)
3732+
static int __lock_is_held(const struct lockdep_map *lock, int read)
37283733
{
37293734
struct task_struct *curr = current;
37303735
int i;
@@ -3938,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
39383943
}
39393944
EXPORT_SYMBOL_GPL(lock_release);
39403945

3941-
int lock_is_held_type(struct lockdep_map *lock, int read)
3946+
int lock_is_held_type(const struct lockdep_map *lock, int read)
39423947
{
39433948
unsigned long flags;
39443949
int ret = 0;
@@ -4295,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
42954300
* If the class exists we look it up and zap it:
42964301
*/
42974302
class = look_up_lock_class(lock, j);
4298-
if (!IS_ERR_OR_NULL(class))
4303+
if (class)
42994304
zap_class(class);
43004305
}
43014306
/*

0 commit comments

Comments
 (0)