Skip to content

Commit e06df6a

Browse files
committed
Merge branch 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 kaslr update from Ingo Molnar: "This adds kernel module load address randomization" * 'x86-kaslr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, kaslr: fix module lock ordering problem x86, kaslr: randomize module base load address
2 parents c0fc3cb + 9dd721c commit e06df6a

File tree

2 files changed

+45
-5
lines changed

2 files changed

+45
-5
lines changed

Documentation/kernel-parameters.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2060,8 +2060,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
20602060
IOAPICs that may be present in the system.
20612061

20622062
nokaslr [X86]
2063-
Disable kernel base offset ASLR (Address Space
2064-
Layout Randomization) if built into the kernel.
2063+
Disable kernel and module base offset ASLR (Address
2064+
Space Layout Randomization) if built into the kernel.
20652065

20662066
noautogroup Disable scheduler automatic task group creation.
20672067

arch/x86/kernel/module.c

Lines changed: 43 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
#include <linux/mm.h>
2929
#include <linux/gfp.h>
3030
#include <linux/jump_label.h>
31+
#include <linux/random.h>
3132

3233
#include <asm/page.h>
3334
#include <asm/pgtable.h>
@@ -43,13 +44,52 @@ do { \
4344
} while (0)
4445
#endif
4546

47+
#ifdef CONFIG_RANDOMIZE_BASE
48+
static unsigned long module_load_offset;
49+
static int randomize_modules = 1;
50+
51+
/* Mutex protects the module_load_offset. */
52+
static DEFINE_MUTEX(module_kaslr_mutex);
53+
54+
static int __init parse_nokaslr(char *p)
55+
{
56+
randomize_modules = 0;
57+
return 0;
58+
}
59+
early_param("nokaslr", parse_nokaslr);
60+
61+
static unsigned long int get_module_load_offset(void)
62+
{
63+
if (randomize_modules) {
64+
mutex_lock(&module_kaslr_mutex);
65+
/*
66+
* Calculate the module_load_offset the first time this
67+
* code is called. Once calculated it stays the same until
68+
* reboot.
69+
*/
70+
if (module_load_offset == 0)
71+
module_load_offset =
72+
(get_random_int() % 1024 + 1) * PAGE_SIZE;
73+
mutex_unlock(&module_kaslr_mutex);
74+
}
75+
return module_load_offset;
76+
}
77+
#else
78+
static unsigned long int get_module_load_offset(void)
79+
{
80+
return 0;
81+
}
82+
#endif
83+
4684
void *module_alloc(unsigned long size)
4785
{
4886
if (PAGE_ALIGN(size) > MODULES_LEN)
4987
return NULL;
50-
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
51-
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
52-
NUMA_NO_NODE, __builtin_return_address(0));
88+
return __vmalloc_node_range(size, 1,
89+
MODULES_VADDR + get_module_load_offset(),
90+
MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
91+
PAGE_KERNEL_EXEC, NUMA_NO_NODE,
92+
__builtin_return_address(0));
5393
}
5494

5595
#ifdef CONFIG_X86_32

0 commit comments

Comments
 (0)