28
28
#include <linux/module.h>
29
29
#include <linux/random.h>
30
30
#include <linux/compat.h>
31
+ #include <linux/security.h>
31
32
#include <asm/pgalloc.h>
32
33
34
+ unsigned long mmap_rnd_mask ;
35
+ unsigned long mmap_align_mask ;
36
+
33
37
static unsigned long stack_maxrandom_size (void )
34
38
{
35
39
if (!(current -> flags & PF_RANDOMIZE ))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
60
64
{
61
65
if (!(current -> flags & PF_RANDOMIZE ))
62
66
return 0 ;
63
- /* 8MB randomization for mmap_base */
64
- return (get_random_int () & 0x7ffUL ) << PAGE_SHIFT ;
67
+ if (is_32bit_task ())
68
+ return (get_random_int () & 0x7ff ) << PAGE_SHIFT ;
69
+ else
70
+ return (get_random_int () & mmap_rnd_mask ) << PAGE_SHIFT ;
65
71
}
66
72
67
73
static unsigned long mmap_base_legacy (void )
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
81
87
return STACK_TOP - stack_maxrandom_size () - mmap_rnd () - gap ;
82
88
}
83
89
90
+ unsigned long
91
+ arch_get_unmapped_area (struct file * filp , unsigned long addr ,
92
+ unsigned long len , unsigned long pgoff , unsigned long flags )
93
+ {
94
+ struct mm_struct * mm = current -> mm ;
95
+ struct vm_area_struct * vma ;
96
+ struct vm_unmapped_area_info info ;
97
+ int do_color_align ;
98
+
99
+ if (len > TASK_SIZE - mmap_min_addr )
100
+ return - ENOMEM ;
101
+
102
+ if (flags & MAP_FIXED )
103
+ return addr ;
104
+
105
+ if (addr ) {
106
+ addr = PAGE_ALIGN (addr );
107
+ vma = find_vma (mm , addr );
108
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
109
+ (!vma || addr + len <= vma -> vm_start ))
110
+ return addr ;
111
+ }
112
+
113
+ do_color_align = 0 ;
114
+ if (filp || (flags & MAP_SHARED ))
115
+ do_color_align = !is_32bit_task ();
116
+
117
+ info .flags = 0 ;
118
+ info .length = len ;
119
+ info .low_limit = mm -> mmap_base ;
120
+ info .high_limit = TASK_SIZE ;
121
+ info .align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT ) : 0 ;
122
+ info .align_offset = pgoff << PAGE_SHIFT ;
123
+ return vm_unmapped_area (& info );
124
+ }
125
+
126
+ unsigned long
127
+ arch_get_unmapped_area_topdown (struct file * filp , const unsigned long addr0 ,
128
+ const unsigned long len , const unsigned long pgoff ,
129
+ const unsigned long flags )
130
+ {
131
+ struct vm_area_struct * vma ;
132
+ struct mm_struct * mm = current -> mm ;
133
+ unsigned long addr = addr0 ;
134
+ struct vm_unmapped_area_info info ;
135
+ int do_color_align ;
136
+
137
+ /* requested length too big for entire address space */
138
+ if (len > TASK_SIZE - mmap_min_addr )
139
+ return - ENOMEM ;
140
+
141
+ if (flags & MAP_FIXED )
142
+ return addr ;
143
+
144
+ /* requesting a specific address */
145
+ if (addr ) {
146
+ addr = PAGE_ALIGN (addr );
147
+ vma = find_vma (mm , addr );
148
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
149
+ (!vma || addr + len <= vma -> vm_start ))
150
+ return addr ;
151
+ }
152
+
153
+ do_color_align = 0 ;
154
+ if (filp || (flags & MAP_SHARED ))
155
+ do_color_align = !is_32bit_task ();
156
+
157
+ info .flags = VM_UNMAPPED_AREA_TOPDOWN ;
158
+ info .length = len ;
159
+ info .low_limit = max (PAGE_SIZE , mmap_min_addr );
160
+ info .high_limit = mm -> mmap_base ;
161
+ info .align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT ) : 0 ;
162
+ info .align_offset = pgoff << PAGE_SHIFT ;
163
+ addr = vm_unmapped_area (& info );
164
+
165
+ /*
166
+ * A failed mmap() very likely causes application failure,
167
+ * so fall back to the bottom-up function here. This scenario
168
+ * can happen with large stack limits and large mmap()
169
+ * allocations.
170
+ */
171
+ if (addr & ~PAGE_MASK ) {
172
+ VM_BUG_ON (addr != - ENOMEM );
173
+ info .flags = 0 ;
174
+ info .low_limit = TASK_UNMAPPED_BASE ;
175
+ info .high_limit = TASK_SIZE ;
176
+ addr = vm_unmapped_area (& info );
177
+ }
178
+
179
+ return addr ;
180
+ }
181
+
182
+ unsigned long randomize_et_dyn (void )
183
+ {
184
+ unsigned long base ;
185
+
186
+ base = (STACK_TOP / 3 * 2 ) & (~mmap_align_mask << PAGE_SHIFT );
187
+ return base + mmap_rnd ();
188
+ }
189
+
84
190
#ifndef CONFIG_64BIT
85
191
86
192
/*
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
177
283
}
178
284
}
179
285
286
+ static int __init setup_mmap_rnd (void )
287
+ {
288
+ struct cpuid cpu_id ;
289
+
290
+ get_cpu_id (& cpu_id );
291
+ switch (cpu_id .machine ) {
292
+ case 0x9672 :
293
+ case 0x2064 :
294
+ case 0x2066 :
295
+ case 0x2084 :
296
+ case 0x2086 :
297
+ case 0x2094 :
298
+ case 0x2096 :
299
+ case 0x2097 :
300
+ case 0x2098 :
301
+ case 0x2817 :
302
+ case 0x2818 :
303
+ case 0x2827 :
304
+ case 0x2828 :
305
+ mmap_rnd_mask = 0x7ffUL ;
306
+ mmap_align_mask = 0UL ;
307
+ break ;
308
+ case 0x2964 : /* z13 */
309
+ default :
310
+ mmap_rnd_mask = 0x3ff80UL ;
311
+ mmap_align_mask = 0x7fUL ;
312
+ break ;
313
+ }
314
+ return 0 ;
315
+ }
316
+ early_initcall (setup_mmap_rnd );
317
+
180
318
#endif
0 commit comments