Skip to content

Commit aa1ef4d

Browse files
xairytorvalds
authored andcommitted
kasan, mm: reset tags when accessing metadata
Kernel allocator code accesses metadata for slab objects, that may lie out-of-bounds of the object itself, or be accessed when an object is freed. Such accesses trigger tag faults and lead to false-positive reports with hardware tag-based KASAN. Software KASAN modes disable instrumentation for allocator code via KASAN_SANITIZE Makefile macro, and rely on kasan_enable/disable_current() annotations which are used to ignore KASAN reports. With hardware tag-based KASAN neither of those options are available, as it doesn't use compiler instrumetation, no tag faults are ignored, and MTE is disabled after the first one. Instead, reset tags when accessing metadata (currently only for SLUB). Link: https://lkml.kernel.org/r/a0f3cefbc49f34c843b664110842de4db28179d0.1606161801.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <[email protected]> Signed-off-by: Vincenzo Frascino <[email protected]> Acked-by: Marco Elver <[email protected]> Reviewed-by: Alexander Potapenko <[email protected]> Tested-by: Vincenzo Frascino <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Branislav Rankov <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Evgenii Stepanov <[email protected]> Cc: Kevin Brodsky <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4291e9e commit aa1ef4d

File tree

3 files changed

+20
-15
lines changed

3 files changed

+20
-15
lines changed

mm/page_alloc.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1204,8 +1204,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
12041204

12051205
/* s390's use of memset() could override KASAN redzones. */
12061206
kasan_disable_current();
1207-
for (i = 0; i < numpages; i++)
1207+
for (i = 0; i < numpages; i++) {
1208+
page_kasan_tag_reset(page + i);
12081209
clear_highpage(page + i);
1210+
}
12091211
kasan_enable_current();
12101212
}
12111213

mm/page_poison.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ static void poison_page(struct page *page)
2525

2626
/* KASAN still think the page is in-use, so skip it. */
2727
kasan_disable_current();
28-
memset(addr, PAGE_POISON, PAGE_SIZE);
28+
memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
2929
kasan_enable_current();
3030
kunmap_atomic(addr);
3131
}

mm/slub.c

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
249249
{
250250
#ifdef CONFIG_SLAB_FREELIST_HARDENED
251251
/*
252-
* When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
252+
* When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
253253
* Normally, this doesn't cause any issues, as both set_freepointer()
254254
* and get_freepointer() are called with a pointer with the same tag.
255255
* However, there are some issues with CONFIG_SLUB_DEBUG code. For
@@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s,
275275

276276
static inline void *get_freepointer(struct kmem_cache *s, void *object)
277277
{
278+
object = kasan_reset_tag(object);
278279
return freelist_dereference(s, object + s->offset);
279280
}
280281

@@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
304305
BUG_ON(object == fp); /* naive detection of double free or corruption */
305306
#endif
306307

308+
freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
307309
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
308310
}
309311

@@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr,
538540
unsigned int length)
539541
{
540542
metadata_access_enable();
541-
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
542-
length, 1);
543+
print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
544+
16, 1, addr, length, 1);
543545
metadata_access_disable();
544546
}
545547

@@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
570572

571573
p = object + get_info_end(s);
572574

573-
return p + alloc;
575+
return kasan_reset_tag(p + alloc);
574576
}
575577

576578
static void set_track(struct kmem_cache *s, void *object,
@@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object,
583585
unsigned int nr_entries;
584586

585587
metadata_access_enable();
586-
nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
588+
nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
589+
TRACK_ADDRS_COUNT, 3);
587590
metadata_access_disable();
588591

589592
if (nr_entries < TRACK_ADDRS_COUNT)
@@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
747750

748751
static void init_object(struct kmem_cache *s, void *object, u8 val)
749752
{
750-
u8 *p = object;
753+
u8 *p = kasan_reset_tag(object);
751754

752755
if (s->flags & SLAB_RED_ZONE)
753756
memset(p - s->red_left_pad, val, s->red_left_pad);
@@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
777780
u8 *addr = page_address(page);
778781

779782
metadata_access_enable();
780-
fault = memchr_inv(start, value, bytes);
783+
fault = memchr_inv(kasan_reset_tag(start), value, bytes);
781784
metadata_access_disable();
782785
if (!fault)
783786
return 1;
@@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
873876

874877
pad = end - remainder;
875878
metadata_access_enable();
876-
fault = memchr_inv(pad, POISON_INUSE, remainder);
879+
fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
877880
metadata_access_disable();
878881
if (!fault)
879882
return 1;
@@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
11181121
return;
11191122

11201123
metadata_access_enable();
1121-
memset(addr, POISON_INUSE, page_size(page));
1124+
memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
11221125
metadata_access_disable();
11231126
}
11241127

@@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
15661569
* Clear the object and the metadata, but don't touch
15671570
* the redzone.
15681571
*/
1569-
memset(object, 0, s->object_size);
1572+
memset(kasan_reset_tag(object), 0, s->object_size);
15701573
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
15711574
: 0;
1572-
memset((char *)object + s->inuse, 0,
1575+
memset((char *)kasan_reset_tag(object) + s->inuse, 0,
15731576
s->size - s->inuse - rsize);
15741577

15751578
}
@@ -2881,10 +2884,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
28812884
stat(s, ALLOC_FASTPATH);
28822885
}
28832886

2884-
maybe_wipe_obj_freeptr(s, object);
2887+
maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
28852888

28862889
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
2887-
memset(object, 0, s->object_size);
2890+
memset(kasan_reset_tag(object), 0, s->object_size);
28882891

28892892
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
28902893

0 commit comments

Comments
 (0)