Skip to content

Commit c935cd6

Browse files
herbertxPeter Zijlstra
authored andcommitted
lockdep: Split header file into lockdep and lockdep_types
There is a header file inclusion loop between asm-generic/bug.h and linux/kernel.h. This causes potential compile failurs depending on the which file is included first. One way of breaking this loop is to stop spinlock_types.h from including lockdep.h. This patch splits lockdep.h into two files for this purpose. Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Sergey Senozhatsky <[email protected]> Reviewed-by: Andy Shevchenko <[email protected]> Acked-by: Petr Mladek <[email protected]> Acked-by: Steven Rostedt (VMware) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent b3a9e3b commit c935cd6

File tree

4 files changed

+200
-177
lines changed

4 files changed

+200
-177
lines changed

include/linux/lockdep.h

Lines changed: 2 additions & 176 deletions
Original file line numberDiff line numberDiff line change
@@ -10,181 +10,20 @@
1010
#ifndef __LINUX_LOCKDEP_H
1111
#define __LINUX_LOCKDEP_H
1212

13+
#include <linux/lockdep_types.h>
14+
1315
struct task_struct;
14-
struct lockdep_map;
1516

1617
/* for sysctl */
1718
extern int prove_locking;
1819
extern int lock_stat;
1920

20-
#define MAX_LOCKDEP_SUBCLASSES 8UL
21-
22-
#include <linux/types.h>
23-
24-
enum lockdep_wait_type {
25-
LD_WAIT_INV = 0, /* not checked, catch all */
26-
27-
LD_WAIT_FREE, /* wait free, rcu etc.. */
28-
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
29-
30-
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
31-
LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
32-
#else
33-
LD_WAIT_CONFIG = LD_WAIT_SPIN,
34-
#endif
35-
LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
36-
37-
LD_WAIT_MAX, /* must be last */
38-
};
39-
4021
#ifdef CONFIG_LOCKDEP
4122

4223
#include <linux/linkage.h>
43-
#include <linux/list.h>
4424
#include <linux/debug_locks.h>
4525
#include <linux/stacktrace.h>
4626

47-
/*
48-
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
49-
* the total number of states... :-(
50-
*/
51-
#define XXX_LOCK_USAGE_STATES (1+2*4)
52-
53-
/*
54-
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
55-
* cached in the instance of lockdep_map
56-
*
57-
* Currently main class (subclass == 0) and signle depth subclass
58-
* are cached in lockdep_map. This optimization is mainly targeting
59-
* on rq->lock. double_rq_lock() acquires this highly competitive with
60-
* single depth.
61-
*/
62-
#define NR_LOCKDEP_CACHING_CLASSES 2
63-
64-
/*
65-
* A lockdep key is associated with each lock object. For static locks we use
66-
* the lock address itself as the key. Dynamically allocated lock objects can
67-
* have a statically or dynamically allocated key. Dynamically allocated lock
68-
* keys must be registered before being used and must be unregistered before
69-
* the key memory is freed.
70-
*/
71-
struct lockdep_subclass_key {
72-
char __one_byte;
73-
} __attribute__ ((__packed__));
74-
75-
/* hash_entry is used to keep track of dynamically allocated keys. */
76-
struct lock_class_key {
77-
union {
78-
struct hlist_node hash_entry;
79-
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
80-
};
81-
};
82-
83-
extern struct lock_class_key __lockdep_no_validate__;
84-
85-
struct lock_trace;
86-
87-
#define LOCKSTAT_POINTS 4
88-
89-
/*
90-
* The lock-class itself. The order of the structure members matters.
91-
* reinit_class() zeroes the key member and all subsequent members.
92-
*/
93-
struct lock_class {
94-
/*
95-
* class-hash:
96-
*/
97-
struct hlist_node hash_entry;
98-
99-
/*
100-
* Entry in all_lock_classes when in use. Entry in free_lock_classes
101-
* when not in use. Instances that are being freed are on one of the
102-
* zapped_classes lists.
103-
*/
104-
struct list_head lock_entry;
105-
106-
/*
107-
* These fields represent a directed graph of lock dependencies,
108-
* to every node we attach a list of "forward" and a list of
109-
* "backward" graph nodes.
110-
*/
111-
struct list_head locks_after, locks_before;
112-
113-
const struct lockdep_subclass_key *key;
114-
unsigned int subclass;
115-
unsigned int dep_gen_id;
116-
117-
/*
118-
* IRQ/softirq usage tracking bits:
119-
*/
120-
unsigned long usage_mask;
121-
const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES];
122-
123-
/*
124-
* Generation counter, when doing certain classes of graph walking,
125-
* to ensure that we check one node only once:
126-
*/
127-
int name_version;
128-
const char *name;
129-
130-
short wait_type_inner;
131-
short wait_type_outer;
132-
133-
#ifdef CONFIG_LOCK_STAT
134-
unsigned long contention_point[LOCKSTAT_POINTS];
135-
unsigned long contending_point[LOCKSTAT_POINTS];
136-
#endif
137-
} __no_randomize_layout;
138-
139-
#ifdef CONFIG_LOCK_STAT
140-
struct lock_time {
141-
s64 min;
142-
s64 max;
143-
s64 total;
144-
unsigned long nr;
145-
};
146-
147-
enum bounce_type {
148-
bounce_acquired_write,
149-
bounce_acquired_read,
150-
bounce_contended_write,
151-
bounce_contended_read,
152-
nr_bounce_types,
153-
154-
bounce_acquired = bounce_acquired_write,
155-
bounce_contended = bounce_contended_write,
156-
};
157-
158-
struct lock_class_stats {
159-
unsigned long contention_point[LOCKSTAT_POINTS];
160-
unsigned long contending_point[LOCKSTAT_POINTS];
161-
struct lock_time read_waittime;
162-
struct lock_time write_waittime;
163-
struct lock_time read_holdtime;
164-
struct lock_time write_holdtime;
165-
unsigned long bounces[nr_bounce_types];
166-
};
167-
168-
struct lock_class_stats lock_stats(struct lock_class *class);
169-
void clear_lock_stats(struct lock_class *class);
170-
#endif
171-
172-
/*
173-
* Map the lock object (the lock instance) to the lock-class object.
174-
* This is embedded into specific lock instances:
175-
*/
176-
struct lockdep_map {
177-
struct lock_class_key *key;
178-
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
179-
const char *name;
180-
short wait_type_outer; /* can be taken in this context */
181-
short wait_type_inner; /* presents this context */
182-
#ifdef CONFIG_LOCK_STAT
183-
int cpu;
184-
unsigned long ip;
185-
#endif
186-
};
187-
18827
static inline void lockdep_copy_map(struct lockdep_map *to,
18928
struct lockdep_map *from)
19029
{
@@ -440,8 +279,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
440279

441280
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
442281

443-
struct pin_cookie { unsigned int val; };
444-
445282
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
446283

447284
extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
@@ -520,10 +357,6 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
520357
# define lockdep_reset() do { debug_locks = 1; } while (0)
521358
# define lockdep_free_key_range(start, size) do { } while (0)
522359
# define lockdep_sys_exit() do { } while (0)
523-
/*
524-
* The class key takes no space if lockdep is disabled:
525-
*/
526-
struct lock_class_key { };
527360

528361
static inline void lockdep_register_key(struct lock_class_key *key)
529362
{
@@ -533,11 +366,6 @@ static inline void lockdep_unregister_key(struct lock_class_key *key)
533366
{
534367
}
535368

536-
/*
537-
* The lockdep_map takes no space if lockdep is disabled:
538-
*/
539-
struct lockdep_map { };
540-
541369
#define lockdep_depth(tsk) (0)
542370

543371
#define lockdep_is_held_type(l, r) (1)
@@ -549,8 +377,6 @@ struct lockdep_map { };
549377

550378
#define lockdep_recursing(tsk) (0)
551379

552-
struct pin_cookie { };
553-
554380
#define NIL_COOKIE (struct pin_cookie){ }
555381

556382
#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })

0 commit comments

Comments
 (0)