Skip to content

Commit 782f890

Browse files
committed
mm/slub: free KFENCE objects in slab_free_hook()
When freeing an object that was allocated from KFENCE, we do that in the slowpath __slab_free(), relying on the fact that KFENCE "slab" cannot be the cpu slab, so the fastpath has to fallback to the slowpath. This optimization doesn't help much though, because is_kfence_address() is checked earlier anyway during the free hook processing or detached freelist building. Thus we can simplify the code by making the slab_free_hook() free the KFENCE object immediately, similarly to KASAN quarantine. In slab_free_hook() we can place kfence_free() above init processing, as callers have been making sure to set init to false for KFENCE objects. This simplifies slab_free(). This places it also above kasan_slab_free() which is ok as that skips KFENCE objects anyway. While at it also determine the init value in slab_free_freelist_hook() outside of the loop. This change will also make introducing per cpu array caches easier. Tested-by: Marco Elver <[email protected]> Reviewed-by: Chengming Zhou <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 284f17a commit 782f890

File tree

1 file changed

+10
-12
lines changed

1 file changed

+10
-12
lines changed

mm/slub.c

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2053,7 +2053,7 @@ void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects,
20532053
* production configuration these hooks all should produce no code at all.
20542054
*
20552055
* Returns true if freeing of the object can proceed, false if its reuse
2056-
* was delayed by KASAN quarantine.
2056+
* was delayed by KASAN quarantine, or it was returned to KFENCE.
20572057
*/
20582058
static __always_inline
20592059
bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
@@ -2071,6 +2071,9 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
20712071
__kcsan_check_access(x, s->object_size,
20722072
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
20732073

2074+
if (kfence_free(x))
2075+
return false;
2076+
20742077
/*
20752078
* As memory initialization might be integrated into KASAN,
20762079
* kasan_slab_free and initialization memset's must be
@@ -2100,23 +2103,25 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
21002103
void *object;
21012104
void *next = *head;
21022105
void *old_tail = *tail;
2106+
bool init;
21032107

21042108
if (is_kfence_address(next)) {
21052109
slab_free_hook(s, next, false);
2106-
return true;
2110+
return false;
21072111
}
21082112

21092113
/* Head and tail of the reconstructed freelist */
21102114
*head = NULL;
21112115
*tail = NULL;
21122116

2117+
init = slab_want_init_on_free(s);
2118+
21132119
do {
21142120
object = next;
21152121
next = get_freepointer(s, object);
21162122

21172123
/* If object's reuse doesn't have to be delayed */
2118-
if (likely(slab_free_hook(s, object,
2119-
slab_want_init_on_free(s)))) {
2124+
if (likely(slab_free_hook(s, object, init))) {
21202125
/* Move object to the new freelist */
21212126
set_freepointer(s, object, *head);
21222127
*head = object;
@@ -4117,9 +4122,6 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
41174122

41184123
stat(s, FREE_SLOWPATH);
41194124

4120-
if (kfence_free(head))
4121-
return;
4122-
41234125
if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
41244126
free_to_partial_list(s, slab, head, tail, cnt, addr);
41254127
return;
@@ -4304,13 +4306,9 @@ static __fastpath_inline
43044306
void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
43054307
unsigned long addr)
43064308
{
4307-
bool init;
4308-
43094309
memcg_slab_free_hook(s, slab, &object, 1);
43104310

4311-
init = !is_kfence_address(object) && slab_want_init_on_free(s);
4312-
4313-
if (likely(slab_free_hook(s, object, init)))
4311+
if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
43144312
do_slab_free(s, slab, object, object, 1, addr);
43154313
}
43164314

0 commit comments

Comments
 (0)