Skip to content

Commit 0f181f9

Browse files
ramosian-glidertorvalds
authored andcommitted
mm/slub.c: init_on_free=1 should wipe freelist ptr for bulk allocations
slab_alloc_node() already zeroed out the freelist pointer if init_on_free was on. Thibaut Sautereau noticed that the same needs to be done for kmem_cache_alloc_bulk(), which performs the allocations separately. kmem_cache_alloc_bulk() is currently used in two places in the kernel, so this change is unlikely to have a major performance impact. SLAB doesn't require a similar change, as auto-initialization makes the allocator store the freelist pointers off-slab. Link: http://lkml.kernel.org/r/[email protected] Fixes: 6471384 ("mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options") Signed-off-by: Alexander Potapenko <[email protected]> Reported-by: Thibaut Sautereau <[email protected]> Reported-by: Kees Cook <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Laura Abbott <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 3c52b0a commit 0f181f9

File tree

1 file changed

+16
-6
lines changed

1 file changed

+16
-6
lines changed

mm/slub.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2671,6 +2671,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
26712671
return p;
26722672
}
26732673

2674+
/*
2675+
* If the object has been wiped upon free, make sure it's fully initialized by
2676+
* zeroing out freelist pointer.
2677+
*/
2678+
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
2679+
void *obj)
2680+
{
2681+
if (unlikely(slab_want_init_on_free(s)) && obj)
2682+
memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
2683+
}
2684+
26742685
/*
26752686
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
26762687
* have the fastpath folded into their functions. So no function call
@@ -2759,12 +2770,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
27592770
prefetch_freepointer(s, next_object);
27602771
stat(s, ALLOC_FASTPATH);
27612772
}
2762-
/*
2763-
* If the object has been wiped upon free, make sure it's fully
2764-
* initialized by zeroing out freelist pointer.
2765-
*/
2766-
if (unlikely(slab_want_init_on_free(s)) && object)
2767-
memset(object + s->offset, 0, sizeof(void *));
2773+
2774+
maybe_wipe_obj_freeptr(s, object);
27682775

27692776
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
27702777
memset(object, 0, s->object_size);
@@ -3178,10 +3185,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
31783185
goto error;
31793186

31803187
c = this_cpu_ptr(s->cpu_slab);
3188+
maybe_wipe_obj_freeptr(s, p[i]);
3189+
31813190
continue; /* goto for-loop */
31823191
}
31833192
c->freelist = get_freepointer(s, object);
31843193
p[i] = object;
3194+
maybe_wipe_obj_freeptr(s, p[i]);
31853195
}
31863196
c->tid = next_tid(c->tid);
31873197
local_irq_enable();

0 commit comments

Comments
 (0)