Skip to content

Commit 2813b9c

Browse files
xairytorvalds
authored andcommitted
kasan, mm, arm64: tag non slab memory allocated via pagealloc
Tag-based KASAN doesn't check memory accesses through pointers tagged with 0xff. When page_address is used to get pointer to memory that corresponds to some page, the tag of the resulting pointer gets set to 0xff, even though the allocated memory might have been tagged differently. For slab pages it's impossible to recover the correct tag to return from page_address, since the page might contain multiple slab objects tagged with different values, and we can't know in advance which one of them is going to get accessed. For non slab pages however, we can recover the tag in page_address, since the whole page was marked with the same tag. This patch adds tagging to non slab memory allocated with pagealloc. To set the tag of the pointer returned from page_address, the tag gets stored to page->flags when the memory gets allocated. Link: http://lkml.kernel.org/r/d758ddcef46a5abc9970182b9137e2fbee202a2c.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <[email protected]> Reviewed-by: Andrey Ryabinin <[email protected]> Reviewed-by: Dmitry Vyukov <[email protected]> Acked-by: Will Deacon <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Mark Rutland <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 41eea9c commit 2813b9c

File tree

7 files changed

+72
-4
lines changed

7 files changed

+72
-4
lines changed

arch/arm64/include/asm/memory.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,13 @@ static inline void *phys_to_virt(phys_addr_t x)
321321
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
322322
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
323323

324-
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
324+
#define page_to_virt(page) ({ \
325+
unsigned long __addr = \
326+
((__page_to_voff(page)) | PAGE_OFFSET); \
327+
__addr = __tag_set(__addr, page_kasan_tag(page)); \
328+
((void *)__addr); \
329+
})
330+
325331
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
326332

327333
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \

include/linux/mm.h

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -804,6 +804,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
804804
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
805805
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
806806
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
807+
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
807808

808809
/*
809810
* Define the bit shifts to access each section. For non-existent
@@ -814,6 +815,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
814815
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
815816
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
816817
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
818+
#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
817819

818820
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
819821
#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -836,6 +838,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
836838
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
837839
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
838840
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
841+
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
839842
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
840843

841844
static inline enum zone_type page_zonenum(const struct page *page)
@@ -1101,6 +1104,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
11011104
}
11021105
#endif /* CONFIG_NUMA_BALANCING */
11031106

1107+
#ifdef CONFIG_KASAN_SW_TAGS
1108+
static inline u8 page_kasan_tag(const struct page *page)
1109+
{
1110+
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1111+
}
1112+
1113+
static inline void page_kasan_tag_set(struct page *page, u8 tag)
1114+
{
1115+
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1116+
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1117+
}
1118+
1119+
static inline void page_kasan_tag_reset(struct page *page)
1120+
{
1121+
page_kasan_tag_set(page, 0xff);
1122+
}
1123+
#else
1124+
static inline u8 page_kasan_tag(const struct page *page)
1125+
{
1126+
return 0xff;
1127+
}
1128+
1129+
static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1130+
static inline void page_kasan_tag_reset(struct page *page) { }
1131+
#endif
1132+
11041133
static inline struct zone *page_zone(const struct page *page)
11051134
{
11061135
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];

include/linux/page-flags-layout.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,16 @@
8282
#define LAST_CPUPID_WIDTH 0
8383
#endif
8484

85+
#ifdef CONFIG_KASAN_SW_TAGS
86+
#define KASAN_TAG_WIDTH 8
87+
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
88+
> BITS_PER_LONG - NR_PAGEFLAGS
89+
#error "KASAN: not enough bits in page flags for tag"
90+
#endif
91+
#else
92+
#define KASAN_TAG_WIDTH 0
93+
#endif
94+
8595
/*
8696
* We are going to use the flags for the page to node mapping if its in
8797
* there. This includes the case where there is no node, so it is implicit.

mm/cma.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
407407
unsigned long pfn = -1;
408408
unsigned long start = 0;
409409
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
410+
size_t i;
410411
struct page *page = NULL;
411412
int ret = -ENOMEM;
412413

@@ -466,6 +467,16 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
466467

467468
trace_cma_alloc(pfn, page, count, align);
468469

470+
/*
471+
* CMA can allocate multiple page blocks, which results in different
472+
* blocks being marked with different tags. Reset the tags to ignore
473+
* those page blocks.
474+
*/
475+
if (page) {
476+
for (i = 0; i < count; i++)
477+
page_kasan_tag_reset(page + i);
478+
}
479+
469480
if (ret && !no_warn) {
470481
pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
471482
__func__, count, ret);

mm/kasan/common.c

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -220,8 +220,15 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
220220

221221
void kasan_alloc_pages(struct page *page, unsigned int order)
222222
{
223+
u8 tag;
224+
unsigned long i;
225+
223226
if (unlikely(PageHighMem(page)))
224227
return;
228+
229+
tag = random_tag();
230+
for (i = 0; i < (1 << order); i++)
231+
page_kasan_tag_set(page + i, tag);
225232
kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
226233
}
227234

@@ -319,6 +326,10 @@ struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
319326

320327
void kasan_poison_slab(struct page *page)
321328
{
329+
unsigned long i;
330+
331+
for (i = 0; i < (1 << compound_order(page)); i++)
332+
page_kasan_tag_reset(page + i);
322333
kasan_poison_shadow(page_address(page),
323334
PAGE_SIZE << compound_order(page),
324335
KASAN_KMALLOC_REDZONE);
@@ -517,7 +528,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
517528
page = virt_to_head_page(ptr);
518529

519530
if (unlikely(!PageSlab(page))) {
520-
if (reset_tag(ptr) != page_address(page)) {
531+
if (ptr != page_address(page)) {
521532
kasan_report_invalid_free(ptr, ip);
522533
return;
523534
}
@@ -530,7 +541,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
530541

531542
void kasan_kfree_large(void *ptr, unsigned long ip)
532543
{
533-
if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
544+
if (ptr != page_address(virt_to_head_page(ptr)))
534545
kasan_report_invalid_free(ptr, ip);
535546
/* The object will be poisoned by page_alloc. */
536547
}

mm/page_alloc.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1183,6 +1183,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
11831183
init_page_count(page);
11841184
page_mapcount_reset(page);
11851185
page_cpupid_reset_last(page);
1186+
page_kasan_tag_reset(page);
11861187

11871188
INIT_LIST_HEAD(&page->lru);
11881189
#ifdef WANT_PAGE_VIRTUAL

mm/slab.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2357,7 +2357,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
23572357
void *freelist;
23582358
void *addr = page_address(page);
23592359

2360-
page->s_mem = addr + colour_off;
2360+
page->s_mem = kasan_reset_tag(addr) + colour_off;
23612361
page->active = 0;
23622362

23632363
if (OBJFREELIST_SLAB(cachep))

0 commit comments

Comments
 (0)