|
22 | 22 | * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
|
23 | 23 | */
|
24 | 24 | /* DEBUG: Perform (expensive) checks on alloc/free */
|
25 |
| -#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100UL) |
| 25 | +#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) |
26 | 26 | /* DEBUG: Red zone objs in a cache */
|
27 |
| -#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400UL) |
| 27 | +#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) |
28 | 28 | /* DEBUG: Poison objects */
|
29 |
| -#define SLAB_POISON ((slab_flags_t __force)0x00000800UL) |
| 29 | +#define SLAB_POISON ((slab_flags_t __force)0x00000800U) |
30 | 30 | /* Align objs on cache lines */
|
31 |
| -#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000UL) |
| 31 | +#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) |
32 | 32 | /* Use GFP_DMA memory */
|
33 |
| -#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000UL) |
| 33 | +#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) |
34 | 34 | /* DEBUG: Store the last owner for bug hunting */
|
35 |
| -#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000UL) |
| 35 | +#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) |
36 | 36 | /* Panic if kmem_cache_create() fails */
|
37 |
| -#define SLAB_PANIC ((slab_flags_t __force)0x00040000UL) |
| 37 | +#define SLAB_PANIC ((slab_flags_t __force)0x00040000U) |
38 | 38 | /*
|
39 | 39 | * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
|
40 | 40 | *
|
|
73 | 73 | * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
|
74 | 74 | */
|
75 | 75 | /* Defer freeing slabs to RCU */
|
76 |
| -#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000UL) |
| 76 | +#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) |
77 | 77 | /* Spread some memory over cpuset */
|
78 |
| -#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000UL) |
| 78 | +#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) |
79 | 79 | /* Trace allocations and frees */
|
80 |
| -#define SLAB_TRACE ((slab_flags_t __force)0x00200000UL) |
| 80 | +#define SLAB_TRACE ((slab_flags_t __force)0x00200000U) |
81 | 81 |
|
82 | 82 | /* Flag to prevent checks on free */
|
83 | 83 | #ifdef CONFIG_DEBUG_OBJECTS
|
84 |
| -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000UL) |
| 84 | +# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) |
85 | 85 | #else
|
86 |
| -# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00000000UL) |
| 86 | +# define SLAB_DEBUG_OBJECTS 0 |
87 | 87 | #endif
|
88 | 88 |
|
89 | 89 | /* Avoid kmemleak tracing */
|
90 |
| -#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000UL) |
| 90 | +#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) |
91 | 91 |
|
92 | 92 | /* Don't track use of uninitialized memory */
|
93 | 93 | #ifdef CONFIG_KMEMCHECK
|
94 |
| -# define SLAB_NOTRACK ((slab_flags_t __force)0x01000000UL) |
| 94 | +# define SLAB_NOTRACK ((slab_flags_t __force)0x01000000U) |
95 | 95 | #else
|
96 |
| -# define SLAB_NOTRACK ((slab_flags_t __force)0x00000000UL) |
| 96 | +# define SLAB_NOTRACK 0 |
97 | 97 | #endif
|
98 | 98 | /* Fault injection mark */
|
99 | 99 | #ifdef CONFIG_FAILSLAB
|
100 |
| -# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000UL) |
| 100 | +# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) |
101 | 101 | #else
|
102 |
| -# define SLAB_FAILSLAB ((slab_flags_t __force)0x00000000UL) |
| 102 | +# define SLAB_FAILSLAB 0 |
103 | 103 | #endif
|
104 | 104 | /* Account to memcg */
|
105 | 105 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
|
106 |
| -# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000UL) |
| 106 | +# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) |
107 | 107 | #else
|
108 |
| -# define SLAB_ACCOUNT ((slab_flags_t __force)0x00000000UL) |
| 108 | +# define SLAB_ACCOUNT 0 |
109 | 109 | #endif
|
110 | 110 |
|
111 | 111 | #ifdef CONFIG_KASAN
|
112 |
| -#define SLAB_KASAN ((slab_flags_t __force)0x08000000UL) |
| 112 | +#define SLAB_KASAN ((slab_flags_t __force)0x08000000U) |
113 | 113 | #else
|
114 |
| -#define SLAB_KASAN ((slab_flags_t __force)0x00000000UL) |
| 114 | +#define SLAB_KASAN 0 |
115 | 115 | #endif
|
116 | 116 |
|
117 | 117 | /* The following flags affect the page allocator grouping pages by mobility */
|
118 | 118 | /* Objects are reclaimable */
|
119 |
| -#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000UL) |
| 119 | +#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) |
120 | 120 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
|
121 | 121 | /*
|
122 | 122 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
|
0 commit comments