Skip to content

Commit d50112e

Browse files
Alexey Dobriyantorvalds
authored andcommitted
slab, slub, slob: add slab_flags_t
Add sparse-checked slab_flags_t for struct kmem_cache::flags (SLAB_POISON, etc). SLAB is bloated temporarily by switching to "unsigned long", but only temporarily. Link: http://lkml.kernel.org/r/20171021100225.GA22428@avx2 Signed-off-by: Alexey Dobriyan <[email protected]> Acked-by: Pekka Enberg <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: David Rientjes <[email protected]> Cc: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent a3ba074 commit d50112e

File tree

15 files changed

+97
-81
lines changed

15 files changed

+97
-81
lines changed

fs/ecryptfs/main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -660,7 +660,7 @@ static struct ecryptfs_cache_info {
660660
struct kmem_cache **cache;
661661
const char *name;
662662
size_t size;
663-
unsigned long flags;
663+
slab_flags_t flags;
664664
void (*ctor)(void *obj);
665665
} ecryptfs_cache_infos[] = {
666666
{

fs/xfs/kmem.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ kmem_zone_init(int size, char *zone_name)
104104
}
105105

106106
static inline kmem_zone_t *
107-
kmem_zone_init_flags(int size, char *zone_name, unsigned long flags,
107+
kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
108108
void (*construct)(void *))
109109
{
110110
return kmem_cache_create(zone_name, size, 0, flags, construct);

include/linux/kasan.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ void kasan_alloc_pages(struct page *page, unsigned int order);
4646
void kasan_free_pages(struct page *page, unsigned int order);
4747

4848
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
49-
unsigned long *flags);
49+
slab_flags_t *flags);
5050
void kasan_cache_shrink(struct kmem_cache *cache);
5151
void kasan_cache_shutdown(struct kmem_cache *cache);
5252

@@ -95,7 +95,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
9595

9696
static inline void kasan_cache_create(struct kmem_cache *cache,
9797
size_t *size,
98-
unsigned long *flags) {}
98+
slab_flags_t *flags) {}
9999
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
100100
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
101101

include/linux/kmemleak.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,14 +48,14 @@ extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
4848
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
4949

5050
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
51-
int min_count, unsigned long flags,
51+
int min_count, slab_flags_t flags,
5252
gfp_t gfp)
5353
{
5454
if (!(flags & SLAB_NOLEAKTRACE))
5555
kmemleak_alloc(ptr, size, min_count, gfp);
5656
}
5757

58-
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
58+
static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
5959
{
6060
if (!(flags & SLAB_NOLEAKTRACE))
6161
kmemleak_free(ptr);
@@ -76,7 +76,7 @@ static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
7676
{
7777
}
7878
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
79-
int min_count, unsigned long flags,
79+
int min_count, slab_flags_t flags,
8080
gfp_t gfp)
8181
{
8282
}
@@ -94,7 +94,7 @@ static inline void kmemleak_free(const void *ptr)
9494
static inline void kmemleak_free_part(const void *ptr, size_t size)
9595
{
9696
}
97-
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
97+
static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags)
9898
{
9999
}
100100
static inline void kmemleak_free_percpu(const void __percpu *ptr)

include/linux/slab.h

Lines changed: 37 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,20 @@
2121
* Flags to pass to kmem_cache_create().
2222
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
2323
*/
24-
#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
25-
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
26-
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
27-
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
28-
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
29-
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
30-
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
24+
/* DEBUG: Perform (expensive) checks on alloc/free */
25+
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100UL)
26+
/* DEBUG: Red zone objs in a cache */
27+
#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400UL)
28+
/* DEBUG: Poison objects */
29+
#define SLAB_POISON ((slab_flags_t __force)0x00000800UL)
30+
/* Align objs on cache lines */
31+
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000UL)
32+
/* Use GFP_DMA memory */
33+
#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000UL)
34+
/* DEBUG: Store the last owner for bug hunting */
35+
#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000UL)
36+
/* Panic if kmem_cache_create() fails */
37+
#define SLAB_PANIC ((slab_flags_t __force)0x00040000UL)
3138
/*
3239
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
3340
*
@@ -65,44 +72,51 @@
6572
*
6673
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
6774
*/
68-
#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
69-
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
70-
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
75+
/* Defer freeing slabs to RCU */
76+
#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000UL)
77+
/* Spread some memory over cpuset */
78+
#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000UL)
79+
/* Trace allocations and frees */
80+
#define SLAB_TRACE ((slab_flags_t __force)0x00200000UL)
7181

7282
/* Flag to prevent checks on free */
7383
#ifdef CONFIG_DEBUG_OBJECTS
74-
# define SLAB_DEBUG_OBJECTS 0x00400000UL
84+
# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000UL)
7585
#else
76-
# define SLAB_DEBUG_OBJECTS 0x00000000UL
86+
# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00000000UL)
7787
#endif
7888

79-
#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
89+
/* Avoid kmemleak tracing */
90+
#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000UL)
8091

8192
/* Don't track use of uninitialized memory */
8293
#ifdef CONFIG_KMEMCHECK
83-
# define SLAB_NOTRACK 0x01000000UL
94+
# define SLAB_NOTRACK ((slab_flags_t __force)0x01000000UL)
8495
#else
85-
# define SLAB_NOTRACK 0x00000000UL
96+
# define SLAB_NOTRACK ((slab_flags_t __force)0x00000000UL)
8697
#endif
98+
/* Fault injection mark */
8799
#ifdef CONFIG_FAILSLAB
88-
# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
100+
# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000UL)
89101
#else
90-
# define SLAB_FAILSLAB 0x00000000UL
102+
# define SLAB_FAILSLAB ((slab_flags_t __force)0x00000000UL)
91103
#endif
104+
/* Account to memcg */
92105
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
93-
# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
106+
# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000UL)
94107
#else
95-
# define SLAB_ACCOUNT 0x00000000UL
108+
# define SLAB_ACCOUNT ((slab_flags_t __force)0x00000000UL)
96109
#endif
97110

98111
#ifdef CONFIG_KASAN
99-
#define SLAB_KASAN 0x08000000UL
112+
#define SLAB_KASAN ((slab_flags_t __force)0x08000000UL)
100113
#else
101-
#define SLAB_KASAN 0x00000000UL
114+
#define SLAB_KASAN ((slab_flags_t __force)0x00000000UL)
102115
#endif
103116

104117
/* The following flags affect the page allocator grouping pages by mobility */
105-
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
118+
/* Objects are reclaimable */
119+
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000UL)
106120
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
107121
/*
108122
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -128,7 +142,7 @@ void __init kmem_cache_init(void);
128142
bool slab_is_available(void);
129143

130144
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
131-
unsigned long,
145+
slab_flags_t,
132146
void (*)(void *));
133147
void kmem_cache_destroy(struct kmem_cache *);
134148
int kmem_cache_shrink(struct kmem_cache *);

include/linux/slab_def.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ struct kmem_cache {
2020
struct reciprocal_value reciprocal_buffer_size;
2121
/* 2) touched by every alloc & free from the backend */
2222

23-
unsigned int flags; /* constant flags */
23+
slab_flags_t flags; /* constant flags */
2424
unsigned int num; /* # of objs per slab */
2525

2626
/* 3) cache_grow/shrink */

include/linux/slub_def.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ struct kmem_cache_order_objects {
8282
struct kmem_cache {
8383
struct kmem_cache_cpu __percpu *cpu_slab;
8484
/* Used for retriving partial slabs etc */
85-
unsigned long flags;
85+
slab_flags_t flags;
8686
unsigned long min_partial;
8787
int size; /* The size of an object including meta data */
8888
int object_size; /* The size of an object without meta data */

include/linux/types.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,7 @@ typedef u32 dma_addr_t;
156156
#endif
157157

158158
typedef unsigned __bitwise gfp_t;
159+
typedef unsigned long __bitwise slab_flags_t;
159160
typedef unsigned __bitwise fmode_t;
160161

161162
#ifdef CONFIG_PHYS_ADDR_T_64BIT

include/net/sock.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1105,7 +1105,7 @@ struct proto {
11051105

11061106
struct kmem_cache *slab;
11071107
unsigned int obj_size;
1108-
int slab_flags;
1108+
slab_flags_t slab_flags;
11091109

11101110
struct percpu_counter *orphan_count;
11111111

mm/kasan/kasan.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ static size_t optimal_redzone(size_t object_size)
337337
}
338338

339339
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
340-
unsigned long *flags)
340+
slab_flags_t *flags)
341341
{
342342
int redzone_adjust;
343343
int orig_size = *size;

mm/slab.c

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -252,8 +252,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
252252
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
253253
} while (0)
254254

255-
#define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
256-
#define CFLGS_OFF_SLAB (0x80000000UL)
255+
#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000UL)
256+
#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000UL)
257257
#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
258258
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
259259

@@ -441,7 +441,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
441441
* Calculate the number of objects and left-over bytes for a given buffer size.
442442
*/
443443
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
444-
unsigned long flags, size_t *left_over)
444+
slab_flags_t flags, size_t *left_over)
445445
{
446446
unsigned int num;
447447
size_t slab_size = PAGE_SIZE << gfporder;
@@ -1759,7 +1759,7 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
17591759
* towards high-order requests, this should be changed.
17601760
*/
17611761
static size_t calculate_slab_order(struct kmem_cache *cachep,
1762-
size_t size, unsigned long flags)
1762+
size_t size, slab_flags_t flags)
17631763
{
17641764
size_t left_over = 0;
17651765
int gfporder;
@@ -1886,16 +1886,16 @@ static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
18861886
return 0;
18871887
}
18881888

1889-
unsigned long kmem_cache_flags(unsigned long object_size,
1890-
unsigned long flags, const char *name,
1889+
slab_flags_t kmem_cache_flags(unsigned long object_size,
1890+
slab_flags_t flags, const char *name,
18911891
void (*ctor)(void *))
18921892
{
18931893
return flags;
18941894
}
18951895

18961896
struct kmem_cache *
18971897
__kmem_cache_alias(const char *name, size_t size, size_t align,
1898-
unsigned long flags, void (*ctor)(void *))
1898+
slab_flags_t flags, void (*ctor)(void *))
18991899
{
19001900
struct kmem_cache *cachep;
19011901

@@ -1913,7 +1913,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
19131913
}
19141914

19151915
static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1916-
size_t size, unsigned long flags)
1916+
size_t size, slab_flags_t flags)
19171917
{
19181918
size_t left;
19191919

@@ -1936,7 +1936,7 @@ static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
19361936
}
19371937

19381938
static bool set_off_slab_cache(struct kmem_cache *cachep,
1939-
size_t size, unsigned long flags)
1939+
size_t size, slab_flags_t flags)
19401940
{
19411941
size_t left;
19421942

@@ -1970,7 +1970,7 @@ static bool set_off_slab_cache(struct kmem_cache *cachep,
19701970
}
19711971

19721972
static bool set_on_slab_cache(struct kmem_cache *cachep,
1973-
size_t size, unsigned long flags)
1973+
size_t size, slab_flags_t flags)
19741974
{
19751975
size_t left;
19761976

@@ -2006,8 +2006,7 @@ static bool set_on_slab_cache(struct kmem_cache *cachep,
20062006
* cacheline. This can be beneficial if you're counting cycles as closely
20072007
* as davem.
20082008
*/
2009-
int
2010-
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2009+
int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
20112010
{
20122011
size_t ralign = BYTES_PER_WORD;
20132012
gfp_t gfp;

mm/slab.h

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ struct kmem_cache {
2121
unsigned int object_size;/* The original size of the object */
2222
unsigned int size; /* The aligned/padded/added on size */
2323
unsigned int align; /* Alignment as calculated */
24-
unsigned long flags; /* Active flags on the slab */
24+
slab_flags_t flags; /* Active flags on the slab */
2525
const char *name; /* Slab name for sysfs */
2626
int refcount; /* Use counter */
2727
void (*ctor)(void *); /* Called on object slot creation */
@@ -79,46 +79,46 @@ extern const struct kmalloc_info_struct {
7979
unsigned long size;
8080
} kmalloc_info[];
8181

82-
unsigned long calculate_alignment(unsigned long flags,
82+
unsigned long calculate_alignment(slab_flags_t flags,
8383
unsigned long align, unsigned long size);
8484

8585
#ifndef CONFIG_SLOB
8686
/* Kmalloc array related functions */
8787
void setup_kmalloc_cache_index_table(void);
88-
void create_kmalloc_caches(unsigned long);
88+
void create_kmalloc_caches(slab_flags_t);
8989

9090
/* Find the kmalloc slab corresponding for a certain size */
9191
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
9292
#endif
9393

9494

9595
/* Functions provided by the slab allocators */
96-
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
96+
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
9797

9898
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
99-
unsigned long flags);
99+
slab_flags_t flags);
100100
extern void create_boot_cache(struct kmem_cache *, const char *name,
101-
size_t size, unsigned long flags);
101+
size_t size, slab_flags_t flags);
102102

103103
int slab_unmergeable(struct kmem_cache *s);
104104
struct kmem_cache *find_mergeable(size_t size, size_t align,
105-
unsigned long flags, const char *name, void (*ctor)(void *));
105+
slab_flags_t flags, const char *name, void (*ctor)(void *));
106106
#ifndef CONFIG_SLOB
107107
struct kmem_cache *
108108
__kmem_cache_alias(const char *name, size_t size, size_t align,
109-
unsigned long flags, void (*ctor)(void *));
109+
slab_flags_t flags, void (*ctor)(void *));
110110

111-
unsigned long kmem_cache_flags(unsigned long object_size,
112-
unsigned long flags, const char *name,
111+
slab_flags_t kmem_cache_flags(unsigned long object_size,
112+
slab_flags_t flags, const char *name,
113113
void (*ctor)(void *));
114114
#else
115115
static inline struct kmem_cache *
116116
__kmem_cache_alias(const char *name, size_t size, size_t align,
117-
unsigned long flags, void (*ctor)(void *))
117+
slab_flags_t flags, void (*ctor)(void *))
118118
{ return NULL; }
119119

120-
static inline unsigned long kmem_cache_flags(unsigned long object_size,
121-
unsigned long flags, const char *name,
120+
static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
121+
slab_flags_t flags, const char *name,
122122
void (*ctor)(void *))
123123
{
124124
return flags;

0 commit comments

Comments
 (0)