Skip to content

Commit c7e8b2c

Browse files
author
Martin Schwidefsky
committed
s390: avoid cache aliasing under z/VM and KVM
commit 1f6b83e ("s390: avoid z13 cache aliasing") checks for the machine type to optimize address space randomization and zero page allocation to avoid cache aliases. This check might fail under a hypervisor with migration support. z/VMs "Single System Image and Live Guest Relocation" facility will "fake" the machine type of the oldest system in the group. For example in a group of zEC12 and Z13 the guest appears to run on a zEC12 (architecture fencing within the relocation domain) Remove the machine type detection and always use cache aliasing rules that are known to work for all machines. These are the z13 aliasing rules. Suggested-by: Christian Borntraeger <[email protected]> Reviewed-by: Heiko Carstens <[email protected]> Signed-off-by: Martin Schwidefsky <[email protected]>
1 parent f07f21b commit c7e8b2c

File tree

4 files changed

+23
-86
lines changed

4 files changed

+23
-86
lines changed

arch/s390/include/asm/elf.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -206,9 +206,16 @@ do { \
206206
} while (0)
207207
#endif /* CONFIG_COMPAT */
208208

209-
extern unsigned long mmap_rnd_mask;
210-
211-
#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
209+
/*
210+
* Cache aliasing on the latest machines calls for a mapping granularity
211+
* of 512KB. For 64-bit processes use a 512KB alignment and a randomization
212+
* of up to 1GB. For 31-bit processes the virtual address space is limited,
213+
* use no alignment and limit the randomization to 8MB.
214+
*/
215+
#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
216+
#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
217+
#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
218+
#define STACK_RND_MASK MMAP_RND_MASK
212219

213220
#define ARCH_DLINFO \
214221
do { \

arch/s390/kernel/process.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
243243

244244
static inline unsigned long brk_rnd(void)
245245
{
246-
/* 8MB for 32bit, 1GB for 64bit */
247-
if (is_32bit_task())
248-
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
249-
else
250-
return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
246+
return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
251247
}
252248

253249
unsigned long arch_randomize_brk(struct mm_struct *mm)

arch/s390/mm/init.c

Lines changed: 3 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask);
4848

4949
static void __init setup_zero_pages(void)
5050
{
51-
struct cpuid cpu_id;
5251
unsigned int order;
5352
struct page *page;
5453
int i;
5554

56-
get_cpu_id(&cpu_id);
57-
switch (cpu_id.machine) {
58-
case 0x9672: /* g5 */
59-
case 0x2064: /* z900 */
60-
case 0x2066: /* z900 */
61-
case 0x2084: /* z990 */
62-
case 0x2086: /* z990 */
63-
case 0x2094: /* z9-109 */
64-
case 0x2096: /* z9-109 */
65-
order = 0;
66-
break;
67-
case 0x2097: /* z10 */
68-
case 0x2098: /* z10 */
69-
case 0x2817: /* z196 */
70-
case 0x2818: /* z196 */
71-
order = 2;
72-
break;
73-
case 0x2827: /* zEC12 */
74-
case 0x2828: /* zEC12 */
75-
order = 5;
76-
break;
77-
case 0x2964: /* z13 */
78-
default:
79-
order = 7;
80-
break;
81-
}
55+
/* Latest machines require a mapping granularity of 512KB */
56+
order = 7;
57+
8258
/* Limit number of empty zero pages for small memory sizes */
8359
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
8460
order--;

arch/s390/mm/mmap.c

Lines changed: 9 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,6 @@
3131
#include <linux/security.h>
3232
#include <asm/pgalloc.h>
3333

34-
unsigned long mmap_rnd_mask;
35-
static unsigned long mmap_align_mask;
36-
3734
static unsigned long stack_maxrandom_size(void)
3835
{
3936
if (!(current->flags & PF_RANDOMIZE))
@@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void)
6259

6360
unsigned long arch_mmap_rnd(void)
6461
{
65-
if (is_32bit_task())
66-
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
67-
else
68-
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
62+
return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
6963
}
7064

7165
static unsigned long mmap_base_legacy(unsigned long rnd)
@@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
9286
struct mm_struct *mm = current->mm;
9387
struct vm_area_struct *vma;
9488
struct vm_unmapped_area_info info;
95-
int do_color_align;
9689

9790
if (len > TASK_SIZE - mmap_min_addr)
9891
return -ENOMEM;
@@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
108101
return addr;
109102
}
110103

111-
do_color_align = 0;
112-
if (filp || (flags & MAP_SHARED))
113-
do_color_align = !is_32bit_task();
114-
115104
info.flags = 0;
116105
info.length = len;
117106
info.low_limit = mm->mmap_base;
118107
info.high_limit = TASK_SIZE;
119-
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
108+
if (filp || (flags & MAP_SHARED))
109+
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
110+
else
111+
info.align_mask = 0;
120112
info.align_offset = pgoff << PAGE_SHIFT;
121113
return vm_unmapped_area(&info);
122114
}
@@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
130122
struct mm_struct *mm = current->mm;
131123
unsigned long addr = addr0;
132124
struct vm_unmapped_area_info info;
133-
int do_color_align;
134125

135126
/* requested length too big for entire address space */
136127
if (len > TASK_SIZE - mmap_min_addr)
@@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
148139
return addr;
149140
}
150141

151-
do_color_align = 0;
152-
if (filp || (flags & MAP_SHARED))
153-
do_color_align = !is_32bit_task();
154-
155142
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
156143
info.length = len;
157144
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
158145
info.high_limit = mm->mmap_base;
159-
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
146+
if (filp || (flags & MAP_SHARED))
147+
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
148+
else
149+
info.align_mask = 0;
160150
info.align_offset = pgoff << PAGE_SHIFT;
161151
addr = vm_unmapped_area(&info);
162152

@@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
254244
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
255245
}
256246
}
257-
258-
static int __init setup_mmap_rnd(void)
259-
{
260-
struct cpuid cpu_id;
261-
262-
get_cpu_id(&cpu_id);
263-
switch (cpu_id.machine) {
264-
case 0x9672:
265-
case 0x2064:
266-
case 0x2066:
267-
case 0x2084:
268-
case 0x2086:
269-
case 0x2094:
270-
case 0x2096:
271-
case 0x2097:
272-
case 0x2098:
273-
case 0x2817:
274-
case 0x2818:
275-
case 0x2827:
276-
case 0x2828:
277-
mmap_rnd_mask = 0x7ffUL;
278-
mmap_align_mask = 0UL;
279-
break;
280-
case 0x2964: /* z13 */
281-
default:
282-
mmap_rnd_mask = 0x3ff80UL;
283-
mmap_align_mask = 0x7fUL;
284-
break;
285-
}
286-
return 0;
287-
}
288-
early_initcall(setup_mmap_rnd);

0 commit comments

Comments
 (0)