Skip to content

Commit 1eccfa0

Browse files
committed
Merge tag 'usercopy-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull usercopy protection from Kees Cook: "Tbhis implements HARDENED_USERCOPY verification of copy_to_user and copy_from_user bounds checking for most architectures on SLAB and SLUB" * tag 'usercopy-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: mm: SLUB hardened usercopy support mm: SLAB hardened usercopy support s390/uaccess: Enable hardened usercopy sparc/uaccess: Enable hardened usercopy powerpc/uaccess: Enable hardened usercopy ia64/uaccess: Enable hardened usercopy arm64/uaccess: Enable hardened usercopy ARM: uaccess: Enable hardened usercopy x86/uaccess: Enable hardened usercopy mm: Hardened usercopy mm: Implement stack frame object validation mm: Add is_migrate_cma_page
2 parents 1bd4403 + ed18adc commit 1eccfa0

File tree

28 files changed

+555
-22
lines changed

28 files changed

+555
-22
lines changed

arch/Kconfig

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -461,6 +461,15 @@ config CC_STACKPROTECTOR_STRONG
461461

462462
endchoice
463463

464+
config HAVE_ARCH_WITHIN_STACK_FRAMES
465+
bool
466+
help
467+
An architecture should select this if it can walk the kernel stack
468+
frames to determine if an object is part of either the arguments
469+
or local variables (i.e. that it excludes saved return addresses,
470+
and similar) by implementing an inline arch_within_stack_frames(),
471+
which is used by CONFIG_HARDENED_USERCOPY.
472+
464473
config HAVE_CONTEXT_TRACKING
465474
bool
466475
help

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ config ARM
3535
select HARDIRQS_SW_RESEND
3636
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
3737
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
38+
select HAVE_ARCH_HARDENED_USERCOPY
3839
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
3940
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
4041
select HAVE_ARCH_MMAP_RND_BITS if MMU

arch/arm/include/asm/uaccess.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
480480
static inline unsigned long __must_check
481481
__copy_from_user(void *to, const void __user *from, unsigned long n)
482482
{
483-
unsigned int __ua_flags = uaccess_save_and_enable();
483+
unsigned int __ua_flags;
484+
485+
check_object_size(to, n, false);
486+
__ua_flags = uaccess_save_and_enable();
484487
n = arm_copy_from_user(to, from, n);
485488
uaccess_restore(__ua_flags);
486489
return n;
@@ -495,11 +498,15 @@ static inline unsigned long __must_check
495498
__copy_to_user(void __user *to, const void *from, unsigned long n)
496499
{
497500
#ifndef CONFIG_UACCESS_WITH_MEMCPY
498-
unsigned int __ua_flags = uaccess_save_and_enable();
501+
unsigned int __ua_flags;
502+
503+
check_object_size(from, n, true);
504+
__ua_flags = uaccess_save_and_enable();
499505
n = arm_copy_to_user(to, from, n);
500506
uaccess_restore(__ua_flags);
501507
return n;
502508
#else
509+
check_object_size(from, n, true);
503510
return arm_copy_to_user(to, from, n);
504511
#endif
505512
}

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ config ARM64
5454
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
5555
select HAVE_ARCH_AUDITSYSCALL
5656
select HAVE_ARCH_BITREVERSE
57+
select HAVE_ARCH_HARDENED_USERCOPY
5758
select HAVE_ARCH_HUGE_VMAP
5859
select HAVE_ARCH_JUMP_LABEL
5960
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)

arch/arm64/include/asm/uaccess.h

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -265,22 +265,25 @@ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long
265265
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
266266
{
267267
kasan_check_write(to, n);
268-
return __arch_copy_from_user(to, from, n);
268+
check_object_size(to, n, false);
269+
return __arch_copy_from_user(to, from, n);
269270
}
270271

271272
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
272273
{
273274
kasan_check_read(from, n);
274-
return __arch_copy_to_user(to, from, n);
275+
check_object_size(from, n, true);
276+
return __arch_copy_to_user(to, from, n);
275277
}
276278

277279
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
278280
{
279281
kasan_check_write(to, n);
280282

281-
if (access_ok(VERIFY_READ, from, n))
283+
if (access_ok(VERIFY_READ, from, n)) {
284+
check_object_size(to, n, false);
282285
n = __arch_copy_from_user(to, from, n);
283-
else /* security hole - plug it */
286+
} else /* security hole - plug it */
284287
memset(to, 0, n);
285288
return n;
286289
}
@@ -289,8 +292,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const voi
289292
{
290293
kasan_check_read(from, n);
291294

292-
if (access_ok(VERIFY_WRITE, to, n))
295+
if (access_ok(VERIFY_WRITE, to, n)) {
296+
check_object_size(from, n, true);
293297
n = __arch_copy_to_user(to, from, n);
298+
}
294299
return n;
295300
}
296301

arch/ia64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ config IA64
5252
select MODULES_USE_ELF_RELA
5353
select ARCH_USE_CMPXCHG_LOCKREF
5454
select HAVE_ARCH_AUDITSYSCALL
55+
select HAVE_ARCH_HARDENED_USERCOPY
5556
default y
5657
help
5758
The Itanium Processor Family is Intel's 64-bit successor to

arch/ia64/include/asm/uaccess.h

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -241,12 +241,18 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
241241
static inline unsigned long
242242
__copy_to_user (void __user *to, const void *from, unsigned long count)
243243
{
244+
if (!__builtin_constant_p(count))
245+
check_object_size(from, count, true);
246+
244247
return __copy_user(to, (__force void __user *) from, count);
245248
}
246249

247250
static inline unsigned long
248251
__copy_from_user (void *to, const void __user *from, unsigned long count)
249252
{
253+
if (!__builtin_constant_p(count))
254+
check_object_size(to, count, false);
255+
250256
return __copy_user((__force void __user *) to, from, count);
251257
}
252258

@@ -258,8 +264,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
258264
const void *__cu_from = (from); \
259265
long __cu_len = (n); \
260266
\
261-
if (__access_ok(__cu_to, __cu_len, get_fs())) \
262-
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
267+
if (__access_ok(__cu_to, __cu_len, get_fs())) { \
268+
if (!__builtin_constant_p(n)) \
269+
check_object_size(__cu_from, __cu_len, true); \
270+
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
271+
} \
263272
__cu_len; \
264273
})
265274

@@ -270,8 +279,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
270279
long __cu_len = (n); \
271280
\
272281
__chk_user_ptr(__cu_from); \
273-
if (__access_ok(__cu_from, __cu_len, get_fs())) \
282+
if (__access_ok(__cu_from, __cu_len, get_fs())) { \
283+
if (!__builtin_constant_p(n)) \
284+
check_object_size(__cu_to, __cu_len, false); \
274285
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
286+
} \
275287
__cu_len; \
276288
})
277289

arch/powerpc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,7 @@ config PPC
166166
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
167167
select GENERIC_CPU_AUTOPROBE
168168
select HAVE_VIRT_CPU_ACCOUNTING
169+
select HAVE_ARCH_HARDENED_USERCOPY
169170

170171
config GENERIC_CSUM
171172
def_bool CPU_LITTLE_ENDIAN

arch/powerpc/include/asm/uaccess.h

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -310,10 +310,15 @@ static inline unsigned long copy_from_user(void *to,
310310
{
311311
unsigned long over;
312312

313-
if (access_ok(VERIFY_READ, from, n))
313+
if (access_ok(VERIFY_READ, from, n)) {
314+
if (!__builtin_constant_p(n))
315+
check_object_size(to, n, false);
314316
return __copy_tofrom_user((__force void __user *)to, from, n);
317+
}
315318
if ((unsigned long)from < TASK_SIZE) {
316319
over = (unsigned long)from + n - TASK_SIZE;
320+
if (!__builtin_constant_p(n - over))
321+
check_object_size(to, n - over, false);
317322
return __copy_tofrom_user((__force void __user *)to, from,
318323
n - over) + over;
319324
}
@@ -325,10 +330,15 @@ static inline unsigned long copy_to_user(void __user *to,
325330
{
326331
unsigned long over;
327332

328-
if (access_ok(VERIFY_WRITE, to, n))
333+
if (access_ok(VERIFY_WRITE, to, n)) {
334+
if (!__builtin_constant_p(n))
335+
check_object_size(from, n, true);
329336
return __copy_tofrom_user(to, (__force void __user *)from, n);
337+
}
330338
if ((unsigned long)to < TASK_SIZE) {
331339
over = (unsigned long)to + n - TASK_SIZE;
340+
if (!__builtin_constant_p(n))
341+
check_object_size(from, n - over, true);
332342
return __copy_tofrom_user(to, (__force void __user *)from,
333343
n - over) + over;
334344
}
@@ -372,6 +382,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
372382
if (ret == 0)
373383
return 0;
374384
}
385+
386+
if (!__builtin_constant_p(n))
387+
check_object_size(to, n, false);
388+
375389
return __copy_tofrom_user((__force void __user *)to, from, n);
376390
}
377391

@@ -398,6 +412,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
398412
if (ret == 0)
399413
return 0;
400414
}
415+
if (!__builtin_constant_p(n))
416+
check_object_size(from, n, true);
417+
401418
return __copy_tofrom_user(to, (__force const void __user *)from, n);
402419
}
403420

arch/s390/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ config S390
123123
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
124124
select HAVE_ARCH_AUDITSYSCALL
125125
select HAVE_ARCH_EARLY_PFN_TO_NID
126+
select HAVE_ARCH_HARDENED_USERCOPY
126127
select HAVE_ARCH_JUMP_LABEL
127128
select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES
128129
select HAVE_ARCH_SECCOMP_FILTER

arch/s390/lib/uaccess.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
104104

105105
unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
106106
{
107+
check_object_size(to, n, false);
107108
if (static_branch_likely(&have_mvcos))
108109
return copy_from_user_mvcos(to, from, n);
109110
return copy_from_user_mvcp(to, from, n);
@@ -177,6 +178,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
177178

178179
unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
179180
{
181+
check_object_size(from, n, true);
180182
if (static_branch_likely(&have_mvcos))
181183
return copy_to_user_mvcos(to, from, n);
182184
return copy_to_user_mvcs(to, from, n);

arch/sparc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ config SPARC
4343
select OLD_SIGSUSPEND
4444
select ARCH_HAS_SG_CHAIN
4545
select CPU_NO_EFFICIENT_FFS
46+
select HAVE_ARCH_HARDENED_USERCOPY
4647

4748
config SPARC32
4849
def_bool !64BIT

arch/sparc/include/asm/uaccess_32.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -248,22 +248,28 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
248248

249249
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
250250
{
251-
if (n && __access_ok((unsigned long) to, n))
251+
if (n && __access_ok((unsigned long) to, n)) {
252+
if (!__builtin_constant_p(n))
253+
check_object_size(from, n, true);
252254
return __copy_user(to, (__force void __user *) from, n);
253-
else
255+
} else
254256
return n;
255257
}
256258

257259
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
258260
{
261+
if (!__builtin_constant_p(n))
262+
check_object_size(from, n, true);
259263
return __copy_user(to, (__force void __user *) from, n);
260264
}
261265

262266
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
263267
{
264-
if (n && __access_ok((unsigned long) from, n))
268+
if (n && __access_ok((unsigned long) from, n)) {
269+
if (!__builtin_constant_p(n))
270+
check_object_size(to, n, false);
265271
return __copy_user((__force void __user *) to, from, n);
266-
else
272+
} else
267273
return n;
268274
}
269275

arch/sparc/include/asm/uaccess_64.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,12 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
210210
static inline unsigned long __must_check
211211
copy_from_user(void *to, const void __user *from, unsigned long size)
212212
{
213-
unsigned long ret = ___copy_from_user(to, from, size);
213+
unsigned long ret;
214214

215+
if (!__builtin_constant_p(size))
216+
check_object_size(to, size, false);
217+
218+
ret = ___copy_from_user(to, from, size);
215219
if (unlikely(ret))
216220
ret = copy_from_user_fixup(to, from, size);
217221

@@ -227,8 +231,11 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
227231
static inline unsigned long __must_check
228232
copy_to_user(void __user *to, const void *from, unsigned long size)
229233
{
230-
unsigned long ret = ___copy_to_user(to, from, size);
234+
unsigned long ret;
231235

236+
if (!__builtin_constant_p(size))
237+
check_object_size(from, size, true);
238+
ret = ___copy_to_user(to, from, size);
232239
if (unlikely(ret))
233240
ret = copy_to_user_fixup(to, from, size);
234241
return ret;

arch/x86/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ config X86
8080
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
8181
select HAVE_AOUT if X86_32
8282
select HAVE_ARCH_AUDITSYSCALL
83+
select HAVE_ARCH_HARDENED_USERCOPY
8384
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
8485
select HAVE_ARCH_JUMP_LABEL
8586
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +92,7 @@ config X86
9192
select HAVE_ARCH_SOFT_DIRTY if X86_64
9293
select HAVE_ARCH_TRACEHOOK
9394
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
95+
select HAVE_ARCH_WITHIN_STACK_FRAMES
9496
select HAVE_EBPF_JIT if X86_64
9597
select HAVE_CC_STACKPROTECTOR
9698
select HAVE_CMPXCHG_DOUBLE

arch/x86/include/asm/thread_info.h

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,50 @@ static inline unsigned long current_stack_pointer(void)
176176
return sp;
177177
}
178178

179+
/*
180+
* Walks up the stack frames to make sure that the specified object is
181+
* entirely contained by a single stack frame.
182+
*
183+
* Returns:
184+
* 1 if within a frame
185+
* -1 if placed across a frame boundary (or outside stack)
186+
* 0 unable to determine (no frame pointers, etc)
187+
*/
188+
static inline int arch_within_stack_frames(const void * const stack,
189+
const void * const stackend,
190+
const void *obj, unsigned long len)
191+
{
192+
#if defined(CONFIG_FRAME_POINTER)
193+
const void *frame = NULL;
194+
const void *oldframe;
195+
196+
oldframe = __builtin_frame_address(1);
197+
if (oldframe)
198+
frame = __builtin_frame_address(2);
199+
/*
200+
* low ----------------------------------------------> high
201+
* [saved bp][saved ip][args][local vars][saved bp][saved ip]
202+
* ^----------------^
203+
* allow copies only within here
204+
*/
205+
while (stack <= frame && frame < stackend) {
206+
/*
207+
* If obj + len extends past the last frame, this
208+
* check won't pass and the next frame will be 0,
209+
* causing us to bail out and correctly report
210+
* the copy as invalid.
211+
*/
212+
if (obj + len <= frame)
213+
return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
214+
oldframe = frame;
215+
frame = *(const void * const *)frame;
216+
}
217+
return -1;
218+
#else
219+
return 0;
220+
#endif
221+
}
222+
179223
#else /* !__ASSEMBLY__ */
180224

181225
#ifdef CONFIG_X86_64

0 commit comments

Comments
 (0)