Skip to content

Commit 12ca42c

Browse files
surenbaghdasaryanakpm00
authored andcommitted
alloc_tag: allocate percpu counters for module tags dynamically
When a module gets unloaded it checks whether any of its tags are still in use and if so, we keep the memory containing module's allocation tags alive until all tags are unused. However percpu counters referenced by the tags are freed by free_module(). This will lead to UAF if the memory allocated by a module is accessed after module was unloaded. To fix this we allocate percpu counters for module allocation tags dynamically and we keep it alive for tags which are still in use after module unloading. This also removes the requirement of a larger PERCPU_MODULE_RESERVE when memory allocation profiling is enabled because percpu memory for counters does not need to be reserved anymore. Link: https://lkml.kernel.org/r/[email protected] Fixes: 0db6f8d ("alloc_tag: load module tags into separate contiguous memory") Signed-off-by: Suren Baghdasaryan <[email protected]> Reported-by: David Wang <[email protected]> Closes: https://lore.kernel.org/all/[email protected]/ Tested-by: David Wang <[email protected]> Cc: Christoph Lameter (Ampere) <[email protected]> Cc: Dennis Zhou <[email protected]> Cc: Kent Overstreet <[email protected]> Cc: Pasha Tatashin <[email protected]> Cc: Tejun Heo <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 221fcbf commit 12ca42c

File tree

5 files changed

+88
-28
lines changed

5 files changed

+88
-28
lines changed

include/linux/alloc_tag.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,13 +104,25 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
104104

105105
#else /* ARCH_NEEDS_WEAK_PER_CPU */
106106

107+
#ifdef MODULE
108+
109+
#define DEFINE_ALLOC_TAG(_alloc_tag) \
110+
static struct alloc_tag _alloc_tag __used __aligned(8) \
111+
__section(ALLOC_TAG_SECTION_NAME) = { \
112+
.ct = CODE_TAG_INIT, \
113+
.counters = NULL };
114+
115+
#else /* MODULE */
116+
107117
#define DEFINE_ALLOC_TAG(_alloc_tag) \
108118
static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
109119
static struct alloc_tag _alloc_tag __used __aligned(8) \
110120
__section(ALLOC_TAG_SECTION_NAME) = { \
111121
.ct = CODE_TAG_INIT, \
112122
.counters = &_alloc_tag_cntr };
113123

124+
#endif /* MODULE */
125+
114126
#endif /* ARCH_NEEDS_WEAK_PER_CPU */
115127

116128
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,

include/linux/codetag.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@ union codetag_ref {
3636
struct codetag_type_desc {
3737
const char *section;
3838
size_t tag_size;
39-
void (*module_load)(struct codetag_type *cttype,
40-
struct codetag_module *cmod);
41-
void (*module_unload)(struct codetag_type *cttype,
42-
struct codetag_module *cmod);
39+
void (*module_load)(struct module *mod,
40+
struct codetag *start, struct codetag *end);
41+
void (*module_unload)(struct module *mod,
42+
struct codetag *start, struct codetag *end);
4343
#ifdef CONFIG_MODULES
4444
void (*module_replaced)(struct module *mod, struct module *new_mod);
4545
bool (*needs_section_mem)(struct module *mod, unsigned long size);

include/linux/percpu.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,7 @@
1515

1616
/* enough to cover all DEFINE_PER_CPUs in modules */
1717
#ifdef CONFIG_MODULES
18-
#ifdef CONFIG_MEM_ALLOC_PROFILING
19-
#define PERCPU_MODULE_RESERVE (8 << 13)
20-
#else
2118
#define PERCPU_MODULE_RESERVE (8 << 10)
22-
#endif
2319
#else
2420
#define PERCPU_MODULE_RESERVE 0
2521
#endif

lib/alloc_tag.c

Lines changed: 69 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -350,18 +350,28 @@ static bool needs_section_mem(struct module *mod, unsigned long size)
350350
return size >= sizeof(struct alloc_tag);
351351
}
352352

353-
static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
353+
static bool clean_unused_counters(struct alloc_tag *start_tag,
354+
struct alloc_tag *end_tag)
354355
{
355-
while (from <= to) {
356+
struct alloc_tag *tag;
357+
bool ret = true;
358+
359+
for (tag = start_tag; tag <= end_tag; tag++) {
356360
struct alloc_tag_counters counter;
357361

358-
counter = alloc_tag_read(from);
359-
if (counter.bytes)
360-
return from;
361-
from++;
362+
if (!tag->counters)
363+
continue;
364+
365+
counter = alloc_tag_read(tag);
366+
if (!counter.bytes) {
367+
free_percpu(tag->counters);
368+
tag->counters = NULL;
369+
} else {
370+
ret = false;
371+
}
362372
}
363373

364-
return NULL;
374+
return ret;
365375
}
366376

367377
/* Called with mod_area_mt locked */
@@ -371,12 +381,16 @@ static void clean_unused_module_areas_locked(void)
371381
struct module *val;
372382

373383
mas_for_each(&mas, val, module_tags.size) {
384+
struct alloc_tag *start_tag;
385+
struct alloc_tag *end_tag;
386+
374387
if (val != &unloaded_mod)
375388
continue;
376389

377390
/* Release area if all tags are unused */
378-
if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
379-
(struct alloc_tag *)(module_tags.start_addr + mas.last)))
391+
start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
392+
end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
393+
if (clean_unused_counters(start_tag, end_tag))
380394
mas_erase(&mas);
381395
}
382396
}
@@ -561,7 +575,8 @@ static void *reserve_module_tags(struct module *mod, unsigned long size,
561575
static void release_module_tags(struct module *mod, bool used)
562576
{
563577
MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
564-
struct alloc_tag *tag;
578+
struct alloc_tag *start_tag;
579+
struct alloc_tag *end_tag;
565580
struct module *val;
566581

567582
mas_lock(&mas);
@@ -575,15 +590,22 @@ static void release_module_tags(struct module *mod, bool used)
575590
if (!used)
576591
goto release_area;
577592

578-
/* Find out if the area is used */
579-
tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
580-
(struct alloc_tag *)(module_tags.start_addr + mas.last));
581-
if (tag) {
582-
struct alloc_tag_counters counter = alloc_tag_read(tag);
593+
start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index);
594+
end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last);
595+
if (!clean_unused_counters(start_tag, end_tag)) {
596+
struct alloc_tag *tag;
597+
598+
for (tag = start_tag; tag <= end_tag; tag++) {
599+
struct alloc_tag_counters counter;
600+
601+
if (!tag->counters)
602+
continue;
583603

584-
pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
585-
tag->ct.filename, tag->ct.lineno, tag->ct.modname,
586-
tag->ct.function, counter.bytes);
604+
counter = alloc_tag_read(tag);
605+
pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
606+
tag->ct.filename, tag->ct.lineno, tag->ct.modname,
607+
tag->ct.function, counter.bytes);
608+
}
587609
} else {
588610
used = false;
589611
}
@@ -596,6 +618,34 @@ static void release_module_tags(struct module *mod, bool used)
596618
mas_unlock(&mas);
597619
}
598620

621+
static void load_module(struct module *mod, struct codetag *start, struct codetag *stop)
622+
{
623+
/* Allocate module alloc_tag percpu counters */
624+
struct alloc_tag *start_tag;
625+
struct alloc_tag *stop_tag;
626+
struct alloc_tag *tag;
627+
628+
if (!mod)
629+
return;
630+
631+
start_tag = ct_to_alloc_tag(start);
632+
stop_tag = ct_to_alloc_tag(stop);
633+
for (tag = start_tag; tag < stop_tag; tag++) {
634+
WARN_ON(tag->counters);
635+
tag->counters = alloc_percpu(struct alloc_tag_counters);
636+
if (!tag->counters) {
637+
while (--tag >= start_tag) {
638+
free_percpu(tag->counters);
639+
tag->counters = NULL;
640+
}
641+
shutdown_mem_profiling(true);
642+
pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n",
643+
mod->name);
644+
break;
645+
}
646+
}
647+
}
648+
599649
static void replace_module(struct module *mod, struct module *new_mod)
600650
{
601651
MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
@@ -757,6 +807,7 @@ static int __init alloc_tag_init(void)
757807
.needs_section_mem = needs_section_mem,
758808
.alloc_section_mem = reserve_module_tags,
759809
.free_section_mem = release_module_tags,
810+
.module_load = load_module,
760811
.module_replaced = replace_module,
761812
#endif
762813
};

lib/codetag.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
194194
if (err >= 0) {
195195
cttype->count += range_size(cttype, &range);
196196
if (cttype->desc.module_load)
197-
cttype->desc.module_load(cttype, cmod);
197+
cttype->desc.module_load(mod, range.start, range.stop);
198198
}
199199
up_write(&cttype->mod_lock);
200200

@@ -333,7 +333,8 @@ void codetag_unload_module(struct module *mod)
333333
}
334334
if (found) {
335335
if (cttype->desc.module_unload)
336-
cttype->desc.module_unload(cttype, cmod);
336+
cttype->desc.module_unload(cmod->mod,
337+
cmod->range.start, cmod->range.stop);
337338

338339
cttype->count -= range_size(cttype, &cmod->range);
339340
idr_remove(&cttype->mod_idr, mod_id);

0 commit comments

Comments
 (0)