Skip to content

Commit 67f2df3

Browse files
keestehcaster
authored andcommitted
mm/slab: Plumb kmem_buckets into __do_kmalloc_node()
Introduce CONFIG_SLAB_BUCKETS which provides the infrastructure to support separated kmalloc buckets (in the following kmem_buckets_create() patches and future codetag-based separation). Since this will provide a mitigation for a very common case of exploits, it is recommended to enable this feature for general purpose distros. By default, the new Kconfig will be enabled if CONFIG_SLAB_FREELIST_HARDENED is enabled (and it is added to the hardening.config Kconfig fragment). To be able to choose which buckets to allocate from, make the buckets available to the internal kmalloc interfaces by adding them as the second argument, rather than depending on the buckets being chosen from the fixed set of global buckets. Where the bucket is not available, pass NULL, which means "use the default system kmalloc bucket set" (the prior existing behavior), as implemented in kmalloc_slab(). To avoid adding the extra argument when !CONFIG_SLAB_BUCKETS, only the top-level macros and static inlines use the buckets argument (where they are stripped out and compiled out respectively). The actual extern functions can then be built without the argument, and the internals fall back to the global kmalloc buckets unconditionally. Co-developed-by: Vlastimil Babka <[email protected]> Signed-off-by: Kees Cook <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 72e0fe2 commit 67f2df3

File tree

7 files changed

+56
-18
lines changed

7 files changed

+56
-18
lines changed

include/linux/slab.h

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -570,6 +570,21 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
570570
int node) __assume_slab_alignment __malloc;
571571
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
572572

573+
/*
574+
* These macros allow declaring a kmem_buckets * parameter alongside size, which
575+
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
576+
* sites don't have to pass NULL.
577+
*/
578+
#ifdef CONFIG_SLAB_BUCKETS
579+
#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b)
580+
#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b)
581+
#define PASS_BUCKET_PARAM(_b) (_b)
582+
#else
583+
#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size)
584+
#define PASS_BUCKET_PARAMS(_size, _b) (_size)
585+
#define PASS_BUCKET_PARAM(_b) NULL
586+
#endif
587+
573588
/*
574589
* The following functions are not to be used directly and are intended only
575590
* for internal use from kmalloc() and kmalloc_node()
@@ -579,7 +594,7 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
579594
void *__kmalloc_noprof(size_t size, gfp_t flags)
580595
__assume_kmalloc_alignment __alloc_size(1);
581596

582-
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
597+
void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
583598
__assume_kmalloc_alignment __alloc_size(1);
584599

585600
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
@@ -680,7 +695,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf
680695
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
681696
flags, node, size);
682697
}
683-
return __kmalloc_node_noprof(size, flags, node);
698+
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
684699
}
685700
#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
686701

@@ -731,8 +746,10 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi
731746
*/
732747
#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
733748

734-
void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
735-
unsigned long caller) __alloc_size(1);
749+
void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
750+
unsigned long caller) __alloc_size(1);
751+
#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
752+
__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
736753
#define kmalloc_node_track_caller(...) \
737754
alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
738755

@@ -758,7 +775,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_
758775
return NULL;
759776
if (__builtin_constant_p(n) && __builtin_constant_p(size))
760777
return kmalloc_node_noprof(bytes, flags, node);
761-
return __kmalloc_node_noprof(bytes, flags, node);
778+
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
762779
}
763780
#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
764781

kernel/configs/hardening.config

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ CONFIG_RANDOMIZE_MEMORY=y
2020
# Randomize allocator freelists, harden metadata.
2121
CONFIG_SLAB_FREELIST_RANDOM=y
2222
CONFIG_SLAB_FREELIST_HARDENED=y
23+
CONFIG_SLAB_BUCKETS=y
2324
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
2425
CONFIG_RANDOM_KMALLOC_CACHES=y
2526

mm/Kconfig

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,23 @@ config SLAB_FREELIST_HARDENED
273273
sacrifices to harden the kernel slab allocator against common
274274
freelist exploit methods.
275275

276+
config SLAB_BUCKETS
277+
bool "Support allocation from separate kmalloc buckets"
278+
depends on !SLUB_TINY
279+
default SLAB_FREELIST_HARDENED
280+
help
281+
Kernel heap attacks frequently depend on being able to create
282+
specifically-sized allocations with user-controlled contents
283+
that will be allocated into the same kmalloc bucket as a
284+
target object. To avoid sharing these allocation buckets,
285+
provide an explicitly separated set of buckets to be used for
286+
user-controlled allocations. This may very slightly increase
287+
memory fragmentation, though in practice it's only a handful
288+
of extra pages since the bulk of user-controlled allocations
289+
are relatively long-lived.
290+
291+
If unsure, say Y.
292+
276293
config SLUB_STATS
277294
default n
278295
bool "Enable performance statistics"

mm/slab.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -403,16 +403,18 @@ static inline unsigned int size_index_elem(unsigned int bytes)
403403
* KMALLOC_MAX_CACHE_SIZE and the caller must check that.
404404
*/
405405
static inline struct kmem_cache *
406-
kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
406+
kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
407407
{
408408
unsigned int index;
409409

410+
if (!b)
411+
b = &kmalloc_caches[kmalloc_type(flags, caller)];
410412
if (size <= 192)
411413
index = kmalloc_size_index[size_index_elem(size)];
412414
else
413415
index = fls(size - 1);
414416

415-
return kmalloc_caches[kmalloc_type(flags, caller)][index];
417+
return (*b)[index];
416418
}
417419

418420
gfp_t kmalloc_fix_flags(gfp_t flags);

mm/slab_common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -703,7 +703,7 @@ size_t kmalloc_size_roundup(size_t size)
703703
* The flags don't matter since size_index is common to all.
704704
* Neither does the caller for just getting ->object_size.
705705
*/
706-
return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
706+
return kmalloc_slab(size, NULL, GFP_KERNEL, 0)->object_size;
707707
}
708708

709709
/* Above the smaller buckets, size is a multiple of page size. */

mm/slub.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4117,7 +4117,7 @@ void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
41174117
EXPORT_SYMBOL(__kmalloc_large_node_noprof);
41184118

41194119
static __always_inline
4120-
void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
4120+
void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
41214121
unsigned long caller)
41224122
{
41234123
struct kmem_cache *s;
@@ -4133,32 +4133,32 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
41334133
if (unlikely(!size))
41344134
return ZERO_SIZE_PTR;
41354135

4136-
s = kmalloc_slab(size, flags, caller);
4136+
s = kmalloc_slab(size, b, flags, caller);
41374137

41384138
ret = slab_alloc_node(s, NULL, flags, node, caller, size);
41394139
ret = kasan_kmalloc(s, ret, size, flags);
41404140
trace_kmalloc(caller, ret, size, s->size, flags, node);
41414141
return ret;
41424142
}
4143-
4144-
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
4143+
void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
41454144
{
4146-
return __do_kmalloc_node(size, flags, node, _RET_IP_);
4145+
return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
41474146
}
41484147
EXPORT_SYMBOL(__kmalloc_node_noprof);
41494148

41504149
void *__kmalloc_noprof(size_t size, gfp_t flags)
41514150
{
4152-
return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
4151+
return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
41534152
}
41544153
EXPORT_SYMBOL(__kmalloc_noprof);
41554154

4156-
void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
4157-
int node, unsigned long caller)
4155+
void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
4156+
int node, unsigned long caller)
41584157
{
4159-
return __do_kmalloc_node(size, flags, node, caller);
4158+
return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
4159+
41604160
}
4161-
EXPORT_SYMBOL(kmalloc_node_track_caller_noprof);
4161+
EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
41624162

41634163
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
41644164
{

scripts/kernel-doc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1729,6 +1729,7 @@ sub dump_function($$) {
17291729
$prototype =~ s/__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +//;
17301730
$prototype =~ s/__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +//;
17311731
$prototype =~ s/__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +//;
1732+
$prototype =~ s/DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)/$1, $2/;
17321733
my $define = $prototype =~ s/^#\s*define\s+//; #ak added
17331734
$prototype =~ s/__attribute_const__ +//;
17341735
$prototype =~ s/__attribute__\s*\(\(

0 commit comments

Comments
 (0)