Skip to content

Commit 1f6b83e

Browse files
author
Martin Schwidefsky
committed
s390: avoid z13 cache aliasing
Avoid cache aliasing on z13 by aligning shared objects to multiples of 512K. The virtual addresses of a page from a shared file needs to have identical bits in the range 2^12 to 2^18. Signed-off-by: Martin Schwidefsky <[email protected]>
1 parent f8b2dcb commit 1f6b83e

File tree

5 files changed

+155
-18
lines changed

5 files changed

+155
-18
lines changed

arch/s390/include/asm/elf.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
163163
the loader. We need to make sure that it is out of the way of the program
164164
that it will "exec", and that there is sufficient room for the brk. */
165165

166-
extern unsigned long randomize_et_dyn(unsigned long base);
167-
#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
166+
extern unsigned long randomize_et_dyn(void);
167+
#define ELF_ET_DYN_BASE randomize_et_dyn()
168168

169169
/* This yields a mask that user programs can use to figure out what
170170
instruction set this CPU supports. */
@@ -209,7 +209,9 @@ do { \
209209
} while (0)
210210
#endif /* CONFIG_COMPAT */
211211

212-
#define STACK_RND_MASK 0x7ffUL
212+
extern unsigned long mmap_rnd_mask;
213+
214+
#define STACK_RND_MASK (mmap_rnd_mask)
213215

214216
#define ARCH_DLINFO \
215217
do { \

arch/s390/include/asm/pgtable.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1779,6 +1779,10 @@ extern int s390_enable_sie(void);
17791779
extern int s390_enable_skey(void);
17801780
extern void s390_reset_cmma(struct mm_struct *mm);
17811781

1782+
/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1783+
#define HAVE_ARCH_UNMAPPED_AREA
1784+
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1785+
17821786
/*
17831787
* No page table caches to initialise
17841788
*/

arch/s390/kernel/process.c

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -243,13 +243,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
243243
ret = PAGE_ALIGN(mm->brk + brk_rnd());
244244
return (ret > mm->brk) ? ret : mm->brk;
245245
}
246-
247-
unsigned long randomize_et_dyn(unsigned long base)
248-
{
249-
unsigned long ret;
250-
251-
if (!(current->flags & PF_RANDOMIZE))
252-
return base;
253-
ret = PAGE_ALIGN(base + brk_rnd());
254-
return (ret > base) ? ret : base;
255-
}

arch/s390/mm/init.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
7171
break;
7272
case 0x2827: /* zEC12 */
7373
case 0x2828: /* zEC12 */
74-
default:
7574
order = 5;
7675
break;
76+
case 0x2964: /* z13 */
77+
default:
78+
order = 7;
79+
break;
7780
}
7881
/* Limit number of empty zero pages for small memory sizes */
79-
if (order > 2 && totalram_pages <= 16384)
80-
order = 2;
82+
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
83+
order--;
8184

8285
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
8386
if (!empty_zero_page)

arch/s390/mm/mmap.c

Lines changed: 140 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,12 @@
2828
#include <linux/module.h>
2929
#include <linux/random.h>
3030
#include <linux/compat.h>
31+
#include <linux/security.h>
3132
#include <asm/pgalloc.h>
3233

34+
unsigned long mmap_rnd_mask;
35+
unsigned long mmap_align_mask;
36+
3337
static unsigned long stack_maxrandom_size(void)
3438
{
3539
if (!(current->flags & PF_RANDOMIZE))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
6064
{
6165
if (!(current->flags & PF_RANDOMIZE))
6266
return 0;
63-
/* 8MB randomization for mmap_base */
64-
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
67+
if (is_32bit_task())
68+
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
69+
else
70+
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
6571
}
6672

6773
static unsigned long mmap_base_legacy(void)
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
8187
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
8288
}
8389

90+
unsigned long
91+
arch_get_unmapped_area(struct file *filp, unsigned long addr,
92+
unsigned long len, unsigned long pgoff, unsigned long flags)
93+
{
94+
struct mm_struct *mm = current->mm;
95+
struct vm_area_struct *vma;
96+
struct vm_unmapped_area_info info;
97+
int do_color_align;
98+
99+
if (len > TASK_SIZE - mmap_min_addr)
100+
return -ENOMEM;
101+
102+
if (flags & MAP_FIXED)
103+
return addr;
104+
105+
if (addr) {
106+
addr = PAGE_ALIGN(addr);
107+
vma = find_vma(mm, addr);
108+
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
109+
(!vma || addr + len <= vma->vm_start))
110+
return addr;
111+
}
112+
113+
do_color_align = 0;
114+
if (filp || (flags & MAP_SHARED))
115+
do_color_align = !is_32bit_task();
116+
117+
info.flags = 0;
118+
info.length = len;
119+
info.low_limit = mm->mmap_base;
120+
info.high_limit = TASK_SIZE;
121+
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
122+
info.align_offset = pgoff << PAGE_SHIFT;
123+
return vm_unmapped_area(&info);
124+
}
125+
126+
unsigned long
127+
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
128+
const unsigned long len, const unsigned long pgoff,
129+
const unsigned long flags)
130+
{
131+
struct vm_area_struct *vma;
132+
struct mm_struct *mm = current->mm;
133+
unsigned long addr = addr0;
134+
struct vm_unmapped_area_info info;
135+
int do_color_align;
136+
137+
/* requested length too big for entire address space */
138+
if (len > TASK_SIZE - mmap_min_addr)
139+
return -ENOMEM;
140+
141+
if (flags & MAP_FIXED)
142+
return addr;
143+
144+
/* requesting a specific address */
145+
if (addr) {
146+
addr = PAGE_ALIGN(addr);
147+
vma = find_vma(mm, addr);
148+
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
149+
(!vma || addr + len <= vma->vm_start))
150+
return addr;
151+
}
152+
153+
do_color_align = 0;
154+
if (filp || (flags & MAP_SHARED))
155+
do_color_align = !is_32bit_task();
156+
157+
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
158+
info.length = len;
159+
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
160+
info.high_limit = mm->mmap_base;
161+
info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
162+
info.align_offset = pgoff << PAGE_SHIFT;
163+
addr = vm_unmapped_area(&info);
164+
165+
/*
166+
* A failed mmap() very likely causes application failure,
167+
* so fall back to the bottom-up function here. This scenario
168+
* can happen with large stack limits and large mmap()
169+
* allocations.
170+
*/
171+
if (addr & ~PAGE_MASK) {
172+
VM_BUG_ON(addr != -ENOMEM);
173+
info.flags = 0;
174+
info.low_limit = TASK_UNMAPPED_BASE;
175+
info.high_limit = TASK_SIZE;
176+
addr = vm_unmapped_area(&info);
177+
}
178+
179+
return addr;
180+
}
181+
182+
unsigned long randomize_et_dyn(void)
183+
{
184+
unsigned long base;
185+
186+
base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
187+
return base + mmap_rnd();
188+
}
189+
84190
#ifndef CONFIG_64BIT
85191

86192
/*
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
177283
}
178284
}
179285

286+
static int __init setup_mmap_rnd(void)
287+
{
288+
struct cpuid cpu_id;
289+
290+
get_cpu_id(&cpu_id);
291+
switch (cpu_id.machine) {
292+
case 0x9672:
293+
case 0x2064:
294+
case 0x2066:
295+
case 0x2084:
296+
case 0x2086:
297+
case 0x2094:
298+
case 0x2096:
299+
case 0x2097:
300+
case 0x2098:
301+
case 0x2817:
302+
case 0x2818:
303+
case 0x2827:
304+
case 0x2828:
305+
mmap_rnd_mask = 0x7ffUL;
306+
mmap_align_mask = 0UL;
307+
break;
308+
case 0x2964: /* z13 */
309+
default:
310+
mmap_rnd_mask = 0x3ff80UL;
311+
mmap_align_mask = 0x7fUL;
312+
break;
313+
}
314+
return 0;
315+
}
316+
early_initcall(setup_mmap_rnd);
317+
180318
#endif

0 commit comments

Comments
 (0)