Skip to content

Commit ed8f8ce

Browse files
author
Russell King
committed
Merge branches 'debug', 'fixes', 'l2c' (early part), 'misc' and 'sa1100' into for-next
5 parents a61cbf5 + 8e64806 + 1d88967 + 3cf3857 + e461894 commit ed8f8ce

File tree

51 files changed

+956
-1257
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+956
-1257
lines changed
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
What: /sys/bus/amba/devices/.../driver_override
2+
Date: September 2014
3+
Contact: Antonios Motakis <[email protected]>
4+
Description:
5+
This file allows the driver for a device to be specified which
6+
will override standard OF, ACPI, ID table, and name matching.
7+
When specified, only a driver with a name matching the value
8+
written to driver_override will have an opportunity to bind to
9+
the device. The override is specified by writing a string to the
10+
driver_override file (echo vfio-amba > driver_override) and may
11+
be cleared with an empty string (echo > driver_override).
12+
This returns the device to standard matching rules binding.
13+
Writing to driver_override does not automatically unbind the
14+
device from its current driver or make any attempt to
15+
automatically load the specified driver. If no driver with a
16+
matching name is currently loaded in the kernel, the device will
17+
not bind to any driver. This also allows devices to opt-out of
18+
driver binding using a driver_override name such as "none".
19+
Only a single driver may be specified in the override, there is
20+
no support for parsing delimiters.

Documentation/devicetree/bindings/arm/l2cc.txt

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,16 @@ Optional properties:
5757
- cache-id-part: cache id part number to be used if it is not present
5858
on hardware
5959
- wt-override: If present then L2 is forced to Write through mode
60+
- arm,double-linefill : Override double linefill enable setting. Enable if
61+
non-zero, disable if zero.
62+
- arm,double-linefill-incr : Override double linefill on INCR read. Enable
63+
if non-zero, disable if zero.
64+
- arm,double-linefill-wrap : Override double linefill on WRAP read. Enable
65+
if non-zero, disable if zero.
66+
- arm,prefetch-drop : Override prefetch drop enable setting. Enable if non-zero,
67+
disable if zero.
68+
- arm,prefetch-offset : Override prefetch offset value. Valid values are
69+
0-7, 15, 23, and 31.
6070

6171
Example:
6272

arch/arm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ config ARM
2929
select HANDLE_DOMAIN_IRQ
3030
select HARDIRQS_SW_RESEND
3131
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
32+
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
3233
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
3334
select HAVE_ARCH_KGDB
3435
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)

arch/arm/boot/compressed/head.S

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ not_angel:
178178

179179
/*
180180
* Set up a page table only if it won't overwrite ourself.
181-
* That means r4 < pc && r4 - 16k page directory > &_end.
181+
* That means r4 < pc || r4 - 16k page directory > &_end.
182182
* Given that r4 > &_end is most unfrequent, we add a rough
183183
* additional 1MB of room for a possible appended DTB.
184184
*/
@@ -263,16 +263,37 @@ restart: adr r0, LC0
263263
* OK... Let's do some funky business here.
264264
* If we do have a DTB appended to zImage, and we do have
265265
* an ATAG list around, we want the later to be translated
266-
* and folded into the former here. To be on the safe side,
267-
* let's temporarily move the stack away into the malloc
268-
* area. No GOT fixup has occurred yet, but none of the
269-
* code we're about to call uses any global variable.
266+
* and folded into the former here. No GOT fixup has occurred
267+
* yet, but none of the code we're about to call uses any
268+
* global variable.
270269
*/
271-
add sp, sp, #0x10000
270+
271+
/* Get the initial DTB size */
272+
ldr r5, [r6, #4]
273+
#ifndef __ARMEB__
274+
/* convert to little endian */
275+
eor r1, r5, r5, ror #16
276+
bic r1, r1, #0x00ff0000
277+
mov r5, r5, ror #8
278+
eor r5, r5, r1, lsr #8
279+
#endif
280+
/* 50% DTB growth should be good enough */
281+
add r5, r5, r5, lsr #1
282+
/* preserve 64-bit alignment */
283+
add r5, r5, #7
284+
bic r5, r5, #7
285+
/* clamp to 32KB min and 1MB max */
286+
cmp r5, #(1 << 15)
287+
movlo r5, #(1 << 15)
288+
cmp r5, #(1 << 20)
289+
movhi r5, #(1 << 20)
290+
/* temporarily relocate the stack past the DTB work space */
291+
add sp, sp, r5
292+
272293
stmfd sp!, {r0-r3, ip, lr}
273294
mov r0, r8
274295
mov r1, r6
275-
sub r2, sp, r6
296+
mov r2, r5
276297
bl atags_to_fdt
277298

278299
/*
@@ -285,11 +306,11 @@ restart: adr r0, LC0
285306
bic r0, r0, #1
286307
add r0, r0, #0x100
287308
mov r1, r6
288-
sub r2, sp, r6
309+
mov r2, r5
289310
bleq atags_to_fdt
290311

291312
ldmfd sp!, {r0-r3, ip, lr}
292-
sub sp, sp, #0x10000
313+
sub sp, sp, r5
293314
#endif
294315

295316
mov r8, r6 @ use the appended device tree
@@ -306,7 +327,7 @@ restart: adr r0, LC0
306327
subs r1, r5, r1
307328
addhi r9, r9, r1
308329

309-
/* Get the dtb's size */
330+
/* Get the current DTB size */
310331
ldr r5, [r6, #4]
311332
#ifndef __ARMEB__
312333
/* convert r5 (dtb size) to little endian */

arch/arm/boot/dts/exynos4210.dtsi

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,15 @@
8181
reg = <0x10023CA0 0x20>;
8282
};
8383

84+
l2c: l2-cache-controller@10502000 {
85+
compatible = "arm,pl310-cache";
86+
reg = <0x10502000 0x1000>;
87+
cache-unified;
88+
cache-level = <2>;
89+
arm,tag-latency = <2 2 1>;
90+
arm,data-latency = <2 2 1>;
91+
};
92+
8493
gic: interrupt-controller@10490000 {
8594
cpu-offset = <0x8000>;
8695
};

arch/arm/boot/dts/exynos4x12.dtsi

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,20 @@
5454
reg = <0x10023CA0 0x20>;
5555
};
5656

57+
l2c: l2-cache-controller@10502000 {
58+
compatible = "arm,pl310-cache";
59+
reg = <0x10502000 0x1000>;
60+
cache-unified;
61+
cache-level = <2>;
62+
arm,tag-latency = <2 2 1>;
63+
arm,data-latency = <3 2 1>;
64+
arm,double-linefill = <1>;
65+
arm,double-linefill-incr = <0>;
66+
arm,double-linefill-wrap = <1>;
67+
arm,prefetch-drop = <1>;
68+
arm,prefetch-offset = <7>;
69+
};
70+
5771
clock: clock-controller@10030000 {
5872
compatible = "samsung,exynos4412-clock";
5973
reg = <0x10030000 0x20000>;

arch/arm/include/asm/bitrev.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#ifndef __ASM_BITREV_H
2+
#define __ASM_BITREV_H
3+
4+
static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
5+
{
6+
__asm__ ("rbit %0, %1" : "=r" (x) : "r" (x));
7+
return x;
8+
}
9+
10+
static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
11+
{
12+
return __arch_bitrev32((u32)x) >> 16;
13+
}
14+
15+
static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
16+
{
17+
return __arch_bitrev32((u32)x) >> 24;
18+
}
19+
20+
#endif

arch/arm/include/asm/compiler.h

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,21 @@
88
* This string is meant to be concatenated with the inline asm string and
99
* will cause compilation to stop on mismatch.
1010
* (for details, see gcc PR 15089)
11+
* For compatibility with clang, we have to specifically take the equivalence
12+
* of 'r11' <-> 'fp' and 'r12' <-> 'ip' into account as well.
1113
*/
12-
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
14+
#define __asmeq(x, y) \
15+
".ifnc " x "," y "; " \
16+
".ifnc " x y ",fpr11; " \
17+
".ifnc " x y ",r11fp; " \
18+
".ifnc " x y ",ipr12; " \
19+
".ifnc " x y ",r12ip; " \
20+
".err; " \
21+
".endif; " \
22+
".endif; " \
23+
".endif; " \
24+
".endif; " \
25+
".endif\n\t"
1326

1427

1528
#endif /* __ASM_ARM_COMPILER_H */

arch/arm/include/asm/outercache.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@
2323

2424
#include <linux/types.h>
2525

26+
struct l2x0_regs;
27+
2628
struct outer_cache_fns {
2729
void (*inv_range)(unsigned long, unsigned long);
2830
void (*clean_range)(unsigned long, unsigned long);
@@ -36,6 +38,7 @@ struct outer_cache_fns {
3638

3739
/* This is an ARM L2C thing */
3840
void (*write_sec)(unsigned long, unsigned);
41+
void (*configure)(const struct l2x0_regs *);
3942
};
4043

4144
extern struct outer_cache_fns outer_cache;

arch/arm/include/uapi/asm/unistd.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -413,6 +413,7 @@
413413
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
414414
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
415415
#define __NR_bpf (__NR_SYSCALL_BASE+386)
416+
#define __NR_execveat (__NR_SYSCALL_BASE+387)
416417

417418
/*
418419
* The following SWIs are ARM private.

arch/arm/kernel/calls.S

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -396,6 +396,7 @@
396396
CALL(sys_getrandom)
397397
/* 385 */ CALL(sys_memfd_create)
398398
CALL(sys_bpf)
399+
CALL(sys_execveat)
399400
#ifndef syscalls_counted
400401
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
401402
#define syscalls_counted

arch/arm/kernel/entry-header.S

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -253,21 +253,22 @@
253253
.endm
254254

255255
.macro restore_user_regs, fast = 0, offset = 0
256-
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
257-
ldr lr, [sp, #\offset + S_PC]! @ get pc
256+
mov r2, sp
257+
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
258+
ldr lr, [r2, #\offset + S_PC]! @ get pc
258259
msr spsr_cxsf, r1 @ save in spsr_svc
259260
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
260261
@ We must avoid clrex due to Cortex-A15 erratum #830321
261-
strex r1, r2, [sp] @ clear the exclusive monitor
262+
strex r1, r2, [r2] @ clear the exclusive monitor
262263
#endif
263264
.if \fast
264-
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
265+
ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
265266
.else
266-
ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
267+
ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
267268
.endif
268269
mov r0, r0 @ ARMv5T and earlier require a nop
269270
@ after ldm {}^
270-
add sp, sp, #S_FRAME_SIZE - S_PC
271+
add sp, sp, #\offset + S_FRAME_SIZE
271272
movs pc, lr @ return & move spsr_svc into cpsr
272273
.endm
273274

arch/arm/kernel/entry-v7m.S

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,12 @@
2222

2323
__invalid_entry:
2424
v7m_exception_entry
25+
#ifdef CONFIG_PRINTK
2526
adr r0, strerr
2627
mrs r1, ipsr
2728
mov r2, lr
2829
bl printk
30+
#endif
2931
mov r0, sp
3032
bl show_regs
3133
1: b 1b

arch/arm/kernel/head.S

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -346,6 +346,12 @@ __turn_mmu_on_loc:
346346

347347
#if defined(CONFIG_SMP)
348348
.text
349+
ENTRY(secondary_startup_arm)
350+
.arm
351+
THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM.
352+
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
353+
THUMB( .thumb ) @ switch to Thumb now.
354+
THUMB(1: )
349355
ENTRY(secondary_startup)
350356
/*
351357
* Common entry point for secondary CPUs.
@@ -385,6 +391,7 @@ ENTRY(secondary_startup)
385391
THUMB( add r12, r10, #PROCINFO_INITFUNC )
386392
THUMB( ret r12 )
387393
ENDPROC(secondary_startup)
394+
ENDPROC(secondary_startup_arm)
388395

389396
/*
390397
* r6 = &secondary_data
@@ -586,7 +593,7 @@ __fixup_pv_table:
586593
add r5, r5, r3 @ adjust table end address
587594
add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
588595
add r7, r7, r3 @ adjust __pv_offset address
589-
mov r0, r8, lsr #12 @ convert to PFN
596+
mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
590597
str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
591598
strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
592599
mov r6, r3, lsr #24 @ constant for add/sub instructions

arch/arm/kernel/irq.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,8 @@ void __init init_IRQ(void)
109109

110110
if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) &&
111111
(machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) {
112-
outer_cache.write_sec = machine_desc->l2c_write_sec;
112+
if (!outer_cache.write_sec)
113+
outer_cache.write_sec = machine_desc->l2c_write_sec;
113114
ret = l2x0_of_init(machine_desc->l2c_aux_val,
114115
machine_desc->l2c_aux_mask);
115116
if (ret)

arch/arm/kernel/perf_event.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
116116
ret = 1;
117117
}
118118

119-
if (left > (s64)armpmu->max_period)
120-
left = armpmu->max_period;
119+
/*
120+
* Limit the maximum period to prevent the counter value
121+
* from overtaking the one we are about to program. In
122+
* effect we are reducing max_period to account for
123+
* interrupt latency (and we are being very conservative).
124+
*/
125+
if (left > (armpmu->max_period >> 1))
126+
left = armpmu->max_period >> 1;
121127

122128
local64_set(&hwc->prev_count, (u64)-left);
123129

arch/arm/kernel/setup.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
657657

658658
/*
659659
* Ensure that start/size are aligned to a page boundary.
660-
* Size is appropriately rounded down, start is rounded up.
660+
* Size is rounded down, start is rounded up.
661661
*/
662-
size -= start & ~PAGE_MASK;
663662
aligned_start = PAGE_ALIGN(start);
663+
if (aligned_start > start + size)
664+
size = 0;
665+
else
666+
size -= aligned_start - start;
664667

665668
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
666669
if (aligned_start > ULONG_MAX) {

arch/arm/kernel/suspend.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,6 @@ extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
1414
extern void cpu_resume_mmu(void);
1515

1616
#ifdef CONFIG_MMU
17-
/*
18-
* Hide the first two arguments to __cpu_suspend - these are an implementation
19-
* detail which platform code shouldn't have to know about.
20-
*/
2117
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
2218
{
2319
struct mm_struct *mm = current->active_mm;

arch/arm/lib/Makefile

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,8 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
1515
io-readsb.o io-writesb.o io-readsl.o io-writesl.o \
1616
call_with_stack.o bswapsdi2.o
1717

18-
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
19-
20-
# the code in uaccess.S is not preemption safe and
21-
# probably faster on ARMv3 only
22-
ifeq ($(CONFIG_PREEMPT),y)
23-
mmu-y += copy_from_user.o copy_to_user.o
24-
else
25-
ifneq ($(CONFIG_CPU_32v3),y)
26-
mmu-y += copy_from_user.o copy_to_user.o
27-
else
28-
mmu-y += uaccess.o
29-
endif
30-
endif
18+
mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
19+
copy_from_user.o copy_to_user.o
3120

3221
# using lib_ here won't override already available weak symbols
3322
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o

0 commit comments

Comments
 (0)