Skip to content

Commit 108c148

Browse files
bvanasscheIngo Molnar
authored andcommitted
locking/lockdep: Add support for dynamic keys
A shortcoming of the current lockdep implementation is that it requires lock keys to be allocated statically. That forces all instances of lock objects that occur in a given data structure to share a lock key. Since lock dependency analysis groups lock objects per key sharing lock keys can cause false positive lockdep reports. Make it possible to avoid such false positive reports by allowing lock keys to be allocated dynamically. Require that dynamically allocated lock keys are registered before use by calling lockdep_register_key(). Complain about attempts to register the same lock key pointer twice without calling lockdep_unregister_key() between successive registration calls. The purpose of the new lock_keys_hash[] data structure that keeps track of all dynamic keys is twofold: - Verify whether the lockdep_register_key() and lockdep_unregister_key() functions are used correctly. - Avoid that lockdep_init_map() complains when encountering a dynamically allocated key. Signed-off-by: Bart Van Assche <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Johannes Berg <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Waiman Long <[email protected]> Cc: Will Deacon <[email protected]> Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 4bf5086 commit 108c148

File tree

2 files changed

+131
-11
lines changed

2 files changed

+131
-11
lines changed

include/linux/lockdep.h

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,15 +46,19 @@ extern int lock_stat;
4646
#define NR_LOCKDEP_CACHING_CLASSES 2
4747

4848
/*
49-
* Lock-classes are keyed via unique addresses, by embedding the
50-
* lockclass-key into the kernel (or module) .data section. (For
51-
* static locks we use the lock address itself as the key.)
49+
* A lockdep key is associated with each lock object. For static locks we use
50+
* the lock address itself as the key. Dynamically allocated lock objects can
51+
* have a statically or dynamically allocated key. Dynamically allocated lock
52+
* keys must be registered before being used and must be unregistered before
53+
* the key memory is freed.
5254
*/
5355
struct lockdep_subclass_key {
5456
char __one_byte;
5557
} __attribute__ ((__packed__));
5658

59+
/* hash_entry is used to keep track of dynamically allocated keys. */
5760
struct lock_class_key {
61+
struct hlist_node hash_entry;
5862
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
5963
};
6064

@@ -273,6 +277,9 @@ extern void lockdep_set_selftest_task(struct task_struct *task);
273277
extern void lockdep_off(void);
274278
extern void lockdep_on(void);
275279

280+
extern void lockdep_register_key(struct lock_class_key *key);
281+
extern void lockdep_unregister_key(struct lock_class_key *key);
282+
276283
/*
277284
* These methods are used by specific locking variants (spinlocks,
278285
* rwlocks, mutexes and rwsems) to pass init/acquire/release events
@@ -434,6 +441,14 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
434441
*/
435442
struct lock_class_key { };
436443

444+
static inline void lockdep_register_key(struct lock_class_key *key)
445+
{
446+
}
447+
448+
static inline void lockdep_unregister_key(struct lock_class_key *key)
449+
{
450+
}
451+
437452
/*
438453
* The lockdep_map takes no space if lockdep is disabled:
439454
*/

kernel/locking/lockdep.c

Lines changed: 113 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
143143
* nr_lock_classes is the number of elements of lock_classes[] that is
144144
* in use.
145145
*/
146+
#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
147+
#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
148+
static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
146149
unsigned long nr_lock_classes;
147150
#ifndef CONFIG_DEBUG_LOCKDEP
148151
static
@@ -641,7 +644,7 @@ static int very_verbose(struct lock_class *class)
641644
* Is this the address of a static object:
642645
*/
643646
#ifdef __KERNEL__
644-
static int static_obj(void *obj)
647+
static int static_obj(const void *obj)
645648
{
646649
unsigned long start = (unsigned long) &_stext,
647650
end = (unsigned long) &_end,
@@ -975,6 +978,71 @@ static void init_data_structures_once(void)
975978
}
976979
}
977980

981+
static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
982+
{
983+
unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
984+
985+
return lock_keys_hash + hash;
986+
}
987+
988+
/* Register a dynamically allocated key. */
989+
void lockdep_register_key(struct lock_class_key *key)
990+
{
991+
struct hlist_head *hash_head;
992+
struct lock_class_key *k;
993+
unsigned long flags;
994+
995+
if (WARN_ON_ONCE(static_obj(key)))
996+
return;
997+
hash_head = keyhashentry(key);
998+
999+
raw_local_irq_save(flags);
1000+
if (!graph_lock())
1001+
goto restore_irqs;
1002+
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1003+
if (WARN_ON_ONCE(k == key))
1004+
goto out_unlock;
1005+
}
1006+
hlist_add_head_rcu(&key->hash_entry, hash_head);
1007+
out_unlock:
1008+
graph_unlock();
1009+
restore_irqs:
1010+
raw_local_irq_restore(flags);
1011+
}
1012+
EXPORT_SYMBOL_GPL(lockdep_register_key);
1013+
1014+
/* Check whether a key has been registered as a dynamic key. */
1015+
static bool is_dynamic_key(const struct lock_class_key *key)
1016+
{
1017+
struct hlist_head *hash_head;
1018+
struct lock_class_key *k;
1019+
bool found = false;
1020+
1021+
if (WARN_ON_ONCE(static_obj(key)))
1022+
return false;
1023+
1024+
/*
1025+
* If lock debugging is disabled lock_keys_hash[] may contain
1026+
* pointers to memory that has already been freed. Avoid triggering
1027+
* a use-after-free in that case by returning early.
1028+
*/
1029+
if (!debug_locks)
1030+
return true;
1031+
1032+
hash_head = keyhashentry(key);
1033+
1034+
rcu_read_lock();
1035+
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1036+
if (k == key) {
1037+
found = true;
1038+
break;
1039+
}
1040+
}
1041+
rcu_read_unlock();
1042+
1043+
return found;
1044+
}
1045+
9781046
/*
9791047
* Register a lock's class in the hash-table, if the class is not present
9801048
* yet. Otherwise we look it up. We cache the result in the lock object
@@ -996,7 +1064,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
9961064
if (!lock->key) {
9971065
if (!assign_lock_key(lock))
9981066
return NULL;
999-
} else if (!static_obj(lock->key)) {
1067+
} else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
10001068
return NULL;
10011069
}
10021070

@@ -3378,13 +3446,12 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
33783446
if (DEBUG_LOCKS_WARN_ON(!key))
33793447
return;
33803448
/*
3381-
* Sanity check, the lock-class key must be persistent:
3449+
* Sanity check, the lock-class key must either have been allocated
3450+
* statically or must have been registered as a dynamic key.
33823451
*/
3383-
if (!static_obj(key)) {
3384-
printk("BUG: key %px not in .data!\n", key);
3385-
/*
3386-
* What it says above ^^^^^, I suggest you read it.
3387-
*/
3452+
if (!static_obj(key) && !is_dynamic_key(key)) {
3453+
if (debug_locks)
3454+
printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
33883455
DEBUG_LOCKS_WARN_ON(1);
33893456
return;
33903457
}
@@ -4795,6 +4862,44 @@ void lockdep_reset_lock(struct lockdep_map *lock)
47954862
lockdep_reset_lock_reg(lock);
47964863
}
47974864

4865+
/* Unregister a dynamically allocated key. */
4866+
void lockdep_unregister_key(struct lock_class_key *key)
4867+
{
4868+
struct hlist_head *hash_head = keyhashentry(key);
4869+
struct lock_class_key *k;
4870+
struct pending_free *pf;
4871+
unsigned long flags;
4872+
bool found = false;
4873+
4874+
might_sleep();
4875+
4876+
if (WARN_ON_ONCE(static_obj(key)))
4877+
return;
4878+
4879+
raw_local_irq_save(flags);
4880+
if (!graph_lock())
4881+
goto out_irq;
4882+
4883+
pf = get_pending_free();
4884+
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
4885+
if (k == key) {
4886+
hlist_del_rcu(&k->hash_entry);
4887+
found = true;
4888+
break;
4889+
}
4890+
}
4891+
WARN_ON_ONCE(!found);
4892+
__lockdep_free_key_range(pf, key, 1);
4893+
call_rcu_zapped(pf);
4894+
graph_unlock();
4895+
out_irq:
4896+
raw_local_irq_restore(flags);
4897+
4898+
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
4899+
synchronize_rcu();
4900+
}
4901+
EXPORT_SYMBOL_GPL(lockdep_unregister_key);
4902+
47984903
void __init lockdep_init(void)
47994904
{
48004905
printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");

0 commit comments

Comments
 (0)