Skip to content

Commit 7f94ffb

Browse files
xairytorvalds
authored andcommitted
kasan: add hooks implementation for tag-based mode
This commit adds tag-based KASAN specific hooks implementation and adjusts common generic and tag-based KASAN ones. 1. When a new slab cache is created, tag-based KASAN rounds up the size of the objects in this cache to KASAN_SHADOW_SCALE_SIZE (== 16). 2. On each kmalloc tag-based KASAN generates a random tag, sets the shadow memory, that corresponds to this object to this tag, and embeds this tag value into the top byte of the returned pointer. 3. On each kfree tag-based KASAN poisons the shadow memory with a random tag to allow detection of use-after-free bugs. The rest of the logic of the hook implementation is very much similar to the one provided by generic KASAN. Tag-based KASAN saves allocation and free stack metadata to the slab object the same way generic KASAN does. Link: http://lkml.kernel.org/r/bda78069e3b8422039794050ddcb2d53d053ed41.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <[email protected]> Reviewed-by: Andrey Ryabinin <[email protected]> Reviewed-by: Dmitry Vyukov <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 5b7c414 commit 7f94ffb

File tree

3 files changed

+153
-19
lines changed

3 files changed

+153
-19
lines changed

mm/kasan/common.c

Lines changed: 97 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -140,6 +140,13 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
140140
{
141141
void *shadow_start, *shadow_end;
142142

143+
/*
144+
* Perform shadow offset calculation based on untagged address, as
145+
* some of the callers (e.g. kasan_poison_object_data) pass tagged
146+
* addresses to this function.
147+
*/
148+
address = reset_tag(address);
149+
143150
shadow_start = kasan_mem_to_shadow(address);
144151
shadow_end = kasan_mem_to_shadow(address + size);
145152

@@ -148,11 +155,24 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
148155

149156
void kasan_unpoison_shadow(const void *address, size_t size)
150157
{
151-
kasan_poison_shadow(address, size, 0);
158+
u8 tag = get_tag(address);
159+
160+
/*
161+
* Perform shadow offset calculation based on untagged address, as
162+
* some of the callers (e.g. kasan_unpoison_object_data) pass tagged
163+
* addresses to this function.
164+
*/
165+
address = reset_tag(address);
166+
167+
kasan_poison_shadow(address, size, tag);
152168

153169
if (size & KASAN_SHADOW_MASK) {
154170
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
155-
*shadow = size & KASAN_SHADOW_MASK;
171+
172+
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
173+
*shadow = tag;
174+
else
175+
*shadow = size & KASAN_SHADOW_MASK;
156176
}
157177
}
158178

@@ -200,8 +220,9 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
200220

201221
void kasan_alloc_pages(struct page *page, unsigned int order)
202222
{
203-
if (likely(!PageHighMem(page)))
204-
kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
223+
if (unlikely(PageHighMem(page)))
224+
return;
225+
kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
205226
}
206227

207228
void kasan_free_pages(struct page *page, unsigned int order)
@@ -218,6 +239,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
218239
*/
219240
static inline unsigned int optimal_redzone(unsigned int object_size)
220241
{
242+
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
243+
return 0;
244+
221245
return
222246
object_size <= 64 - 16 ? 16 :
223247
object_size <= 128 - 32 ? 32 :
@@ -232,27 +256,28 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
232256
slab_flags_t *flags)
233257
{
234258
unsigned int orig_size = *size;
259+
unsigned int redzone_size;
235260
int redzone_adjust;
236261

237262
/* Add alloc meta. */
238263
cache->kasan_info.alloc_meta_offset = *size;
239264
*size += sizeof(struct kasan_alloc_meta);
240265

241266
/* Add free meta. */
242-
if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
243-
cache->object_size < sizeof(struct kasan_free_meta)) {
267+
if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
268+
(cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
269+
cache->object_size < sizeof(struct kasan_free_meta))) {
244270
cache->kasan_info.free_meta_offset = *size;
245271
*size += sizeof(struct kasan_free_meta);
246272
}
247-
redzone_adjust = optimal_redzone(cache->object_size) -
248-
(*size - cache->object_size);
249273

274+
redzone_size = optimal_redzone(cache->object_size);
275+
redzone_adjust = redzone_size - (*size - cache->object_size);
250276
if (redzone_adjust > 0)
251277
*size += redzone_adjust;
252278

253279
*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
254-
max(*size, cache->object_size +
255-
optimal_redzone(cache->object_size)));
280+
max(*size, cache->object_size + redzone_size));
256281

257282
/*
258283
* If the metadata doesn't fit, don't enable KASAN at all.
@@ -265,6 +290,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
265290
return;
266291
}
267292

293+
cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
294+
268295
*flags |= SLAB_KASAN;
269296
}
270297

@@ -309,6 +336,32 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
309336
KASAN_KMALLOC_REDZONE);
310337
}
311338

339+
/*
340+
* Since it's desirable to only call object contructors once during slab
341+
* allocation, we preassign tags to all such objects. Also preassign tags for
342+
* SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
343+
* For SLAB allocator we can't preassign tags randomly since the freelist is
344+
* stored as an array of indexes instead of a linked list. Assign tags based
345+
* on objects indexes, so that objects that are next to each other get
346+
* different tags.
347+
* After a tag is assigned, the object always gets allocated with the same tag.
348+
* The reason is that we can't change tags for objects with constructors on
349+
* reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
350+
* code can save the pointer to the object somewhere (e.g. in the object
351+
* itself). Then if we retag it, the old saved pointer will become invalid.
352+
*/
353+
static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new)
354+
{
355+
if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
356+
return new ? KASAN_TAG_KERNEL : random_tag();
357+
358+
#ifdef CONFIG_SLAB
359+
return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
360+
#else
361+
return new ? random_tag() : get_tag(object);
362+
#endif
363+
}
364+
312365
void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
313366
{
314367
struct kasan_alloc_meta *alloc_info;
@@ -319,6 +372,9 @@ void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
319372
alloc_info = get_alloc_info(cache, object);
320373
__memset(alloc_info, 0, sizeof(*alloc_info));
321374

375+
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
376+
object = set_tag(object, assign_tag(cache, object, true));
377+
322378
return (void *)object;
323379
}
324380

@@ -327,15 +383,30 @@ void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
327383
return kasan_kmalloc(cache, object, cache->object_size, flags);
328384
}
329385

386+
static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
387+
{
388+
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
389+
return shadow_byte < 0 ||
390+
shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
391+
else
392+
return tag != (u8)shadow_byte;
393+
}
394+
330395
static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
331396
unsigned long ip, bool quarantine)
332397
{
333398
s8 shadow_byte;
399+
u8 tag;
400+
void *tagged_object;
334401
unsigned long rounded_up_size;
335402

403+
tag = get_tag(object);
404+
tagged_object = object;
405+
object = reset_tag(object);
406+
336407
if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
337408
object)) {
338-
kasan_report_invalid_free(object, ip);
409+
kasan_report_invalid_free(tagged_object, ip);
339410
return true;
340411
}
341412

@@ -344,20 +415,22 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
344415
return false;
345416

346417
shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
347-
if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
348-
kasan_report_invalid_free(object, ip);
418+
if (shadow_invalid(tag, shadow_byte)) {
419+
kasan_report_invalid_free(tagged_object, ip);
349420
return true;
350421
}
351422

352423
rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
353424
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
354425

355-
if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
426+
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
427+
unlikely(!(cache->flags & SLAB_KASAN)))
356428
return false;
357429

358430
set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
359431
quarantine_put(get_free_info(cache, object), cache);
360-
return true;
432+
433+
return IS_ENABLED(CONFIG_KASAN_GENERIC);
361434
}
362435

363436
bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
@@ -370,6 +443,7 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
370443
{
371444
unsigned long redzone_start;
372445
unsigned long redzone_end;
446+
u8 tag;
373447

374448
if (gfpflags_allow_blocking(flags))
375449
quarantine_reduce();
@@ -382,14 +456,18 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
382456
redzone_end = round_up((unsigned long)object + cache->object_size,
383457
KASAN_SHADOW_SCALE_SIZE);
384458

385-
kasan_unpoison_shadow(object, size);
459+
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
460+
tag = assign_tag(cache, object, false);
461+
462+
/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
463+
kasan_unpoison_shadow(set_tag(object, tag), size);
386464
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
387465
KASAN_KMALLOC_REDZONE);
388466

389467
if (cache->flags & SLAB_KASAN)
390468
set_track(&get_alloc_info(cache, object)->alloc_track, flags);
391469

392-
return (void *)object;
470+
return set_tag(object, tag);
393471
}
394472
EXPORT_SYMBOL(kasan_kmalloc);
395473

@@ -439,7 +517,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
439517
page = virt_to_head_page(ptr);
440518

441519
if (unlikely(!PageSlab(page))) {
442-
if (ptr != page_address(page)) {
520+
if (reset_tag(ptr) != page_address(page)) {
443521
kasan_report_invalid_free(ptr, ip);
444522
return;
445523
}
@@ -452,7 +530,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
452530

453531
void kasan_kfree_large(void *ptr, unsigned long ip)
454532
{
455-
if (ptr != page_address(virt_to_head_page(ptr)))
533+
if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
456534
kasan_report_invalid_free(ptr, ip);
457535
/* The object will be poisoned by page_alloc. */
458536
}

mm/kasan/kasan.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,18 @@
1212
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
1313
#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
1414

15+
#ifdef CONFIG_KASAN_GENERIC
1516
#define KASAN_FREE_PAGE 0xFF /* page was freed */
1617
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
1718
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
1819
#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
20+
#else
21+
#define KASAN_FREE_PAGE KASAN_TAG_INVALID
22+
#define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
23+
#define KASAN_KMALLOC_REDZONE KASAN_TAG_INVALID
24+
#define KASAN_KMALLOC_FREE KASAN_TAG_INVALID
25+
#endif
26+
1927
#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
2028

2129
/*

mm/kasan/tags.c

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,15 +78,60 @@ void *kasan_reset_tag(const void *addr)
7878
void check_memory_region(unsigned long addr, size_t size, bool write,
7979
unsigned long ret_ip)
8080
{
81+
u8 tag;
82+
u8 *shadow_first, *shadow_last, *shadow;
83+
void *untagged_addr;
84+
85+
if (unlikely(size == 0))
86+
return;
87+
88+
tag = get_tag((const void *)addr);
89+
90+
/*
91+
* Ignore accesses for pointers tagged with 0xff (native kernel
92+
* pointer tag) to suppress false positives caused by kmap.
93+
*
94+
* Some kernel code was written to account for archs that don't keep
95+
* high memory mapped all the time, but rather map and unmap particular
96+
* pages when needed. Instead of storing a pointer to the kernel memory,
97+
* this code saves the address of the page structure and offset within
98+
* that page for later use. Those pages are then mapped and unmapped
99+
* with kmap/kunmap when necessary and virt_to_page is used to get the
100+
* virtual address of the page. For arm64 (that keeps the high memory
101+
* mapped all the time), kmap is turned into a page_address call.
102+
103+
* The issue is that with use of the page_address + virt_to_page
104+
* sequence the top byte value of the original pointer gets lost (gets
105+
* set to KASAN_TAG_KERNEL (0xFF)).
106+
*/
107+
if (tag == KASAN_TAG_KERNEL)
108+
return;
109+
110+
untagged_addr = reset_tag((const void *)addr);
111+
if (unlikely(untagged_addr <
112+
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
113+
kasan_report(addr, size, write, ret_ip);
114+
return;
115+
}
116+
shadow_first = kasan_mem_to_shadow(untagged_addr);
117+
shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
118+
for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
119+
if (*shadow != tag) {
120+
kasan_report(addr, size, write, ret_ip);
121+
return;
122+
}
123+
}
81124
}
82125

83126
#define DEFINE_HWASAN_LOAD_STORE(size) \
84127
void __hwasan_load##size##_noabort(unsigned long addr) \
85128
{ \
129+
check_memory_region(addr, size, false, _RET_IP_); \
86130
} \
87131
EXPORT_SYMBOL(__hwasan_load##size##_noabort); \
88132
void __hwasan_store##size##_noabort(unsigned long addr) \
89133
{ \
134+
check_memory_region(addr, size, true, _RET_IP_); \
90135
} \
91136
EXPORT_SYMBOL(__hwasan_store##size##_noabort)
92137

@@ -98,15 +143,18 @@ DEFINE_HWASAN_LOAD_STORE(16);
98143

99144
void __hwasan_loadN_noabort(unsigned long addr, unsigned long size)
100145
{
146+
check_memory_region(addr, size, false, _RET_IP_);
101147
}
102148
EXPORT_SYMBOL(__hwasan_loadN_noabort);
103149

104150
void __hwasan_storeN_noabort(unsigned long addr, unsigned long size)
105151
{
152+
check_memory_region(addr, size, true, _RET_IP_);
106153
}
107154
EXPORT_SYMBOL(__hwasan_storeN_noabort);
108155

109156
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
110157
{
158+
kasan_poison_shadow((void *)addr, size, tag);
111159
}
112160
EXPORT_SYMBOL(__hwasan_tag_memory);

0 commit comments

Comments
 (0)