Skip to content

Commit a5e90b1

Browse files
committed
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "Further ARM fixes: - Anson Huang noticed that we were corrupting a register we shouldn't be during suspend on some CPUs. - Shengjiu Wang spotted a bug in the 'swp' instruction emulation. - Will Deacon fixed a bug in the ASID allocator. - Laura Abbott fixed the kernel permission protection to apply to all threads running in the system. - I've fixed two bugs with the domain access control register handling, one to do with printing an appropriate value at oops time, and the other to further fix the uaccess_with_memcpy code" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: 8475/1: SWP emulation: Restore original *data when failed ARM: 8471/1: need to save/restore arm register(r11) when it is corrupted ARM: fix uaccess_with_memcpy() with SW_DOMAIN_PAN ARM: report proper DACR value in oops dumps ARM: 8464/1: Update all mm structures with section adjustments ARM: 8465/1: mm: keep reserved ASIDs in sync with mm after multiple rollovers
2 parents edb42dc + 34bfbae commit a5e90b1

File tree

7 files changed

+138
-68
lines changed

7 files changed

+138
-68
lines changed

arch/arm/include/asm/uaccess.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
510510
static inline unsigned long __must_check
511511
__copy_to_user(void __user *to, const void *from, unsigned long n)
512512
{
513+
#ifndef CONFIG_UACCESS_WITH_MEMCPY
513514
unsigned int __ua_flags = uaccess_save_and_enable();
514515
n = arm_copy_to_user(to, from, n);
515516
uaccess_restore(__ua_flags);
516517
return n;
518+
#else
519+
return arm_copy_to_user(to, from, n);
520+
#endif
517521
}
518522

519523
extern unsigned long __must_check

arch/arm/kernel/process.c

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
9595
{
9696
unsigned long flags;
9797
char buf[64];
98+
#ifndef CONFIG_CPU_V7M
99+
unsigned int domain;
100+
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
101+
/*
102+
* Get the domain register for the parent context. In user
103+
* mode, we don't save the DACR, so lets use what it should
104+
* be. For other modes, we place it after the pt_regs struct.
105+
*/
106+
if (user_mode(regs))
107+
domain = DACR_UACCESS_ENABLE;
108+
else
109+
domain = *(unsigned int *)(regs + 1);
110+
#else
111+
domain = get_domain();
112+
#endif
113+
#endif
98114

99115
show_regs_print_info(KERN_DEFAULT);
100116

@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
123139

124140
#ifndef CONFIG_CPU_V7M
125141
{
126-
unsigned int domain = get_domain();
127142
const char *segment;
128143

129-
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
130-
/*
131-
* Get the domain register for the parent context. In user
132-
* mode, we don't save the DACR, so lets use what it should
133-
* be. For other modes, we place it after the pt_regs struct.
134-
*/
135-
if (user_mode(regs))
136-
domain = DACR_UACCESS_ENABLE;
137-
else
138-
domain = *(unsigned int *)(regs + 1);
139-
#endif
140-
141144
if ((domain & domain_mask(DOMAIN_USER)) ==
142145
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
143146
segment = "none";
@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
163166
buf[0] = '\0';
164167
#ifdef CONFIG_CPU_CP15_MMU
165168
{
166-
unsigned int transbase, dac = get_domain();
169+
unsigned int transbase;
167170
asm("mrc p15, 0, %0, c2, c0\n\t"
168171
: "=r" (transbase));
169172
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
170-
transbase, dac);
173+
transbase, domain);
171174
}
172175
#endif
173176
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));

arch/arm/kernel/swp_emulate.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@
3636
*/
3737
#define __user_swpX_asm(data, addr, res, temp, B) \
3838
__asm__ __volatile__( \
39-
" mov %2, %1\n" \
40-
"0: ldrex"B" %1, [%3]\n" \
41-
"1: strex"B" %0, %2, [%3]\n" \
39+
"0: ldrex"B" %2, [%3]\n" \
40+
"1: strex"B" %0, %1, [%3]\n" \
4241
" cmp %0, #0\n" \
42+
" moveq %1, %2\n" \
4343
" movne %0, %4\n" \
4444
"2:\n" \
4545
" .section .text.fixup,\"ax\"\n" \

arch/arm/lib/uaccess_with_memcpy.c

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
8888
static unsigned long noinline
8989
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
9090
{
91+
unsigned long ua_flags;
9192
int atomic;
9293

9394
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
118119
if (tocopy > n)
119120
tocopy = n;
120121

122+
ua_flags = uaccess_save_and_enable();
121123
memcpy((void *)to, from, tocopy);
124+
uaccess_restore(ua_flags);
122125
to += tocopy;
123126
from += tocopy;
124127
n -= tocopy;
@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
145148
* With frame pointer disabled, tail call optimization kicks in
146149
* as well making this test almost invisible.
147150
*/
148-
if (n < 64)
149-
return __copy_to_user_std(to, from, n);
150-
return __copy_to_user_memcpy(to, from, n);
151+
if (n < 64) {
152+
unsigned long ua_flags = uaccess_save_and_enable();
153+
n = __copy_to_user_std(to, from, n);
154+
uaccess_restore(ua_flags);
155+
} else {
156+
n = __copy_to_user_memcpy(to, from, n);
157+
}
158+
return n;
151159
}
152160

153161
static unsigned long noinline
154162
__clear_user_memset(void __user *addr, unsigned long n)
155163
{
164+
unsigned long ua_flags;
165+
156166
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
157167
memset((void *)addr, 0, n);
158168
return 0;
@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
175185
if (tocopy > n)
176186
tocopy = n;
177187

188+
ua_flags = uaccess_save_and_enable();
178189
memset((void *)addr, 0, tocopy);
190+
uaccess_restore(ua_flags);
179191
addr += tocopy;
180192
n -= tocopy;
181193

@@ -193,9 +205,14 @@ __clear_user_memset(void __user *addr, unsigned long n)
193205
unsigned long arm_clear_user(void __user *addr, unsigned long n)
194206
{
195207
/* See rational for this in __copy_to_user() above. */
196-
if (n < 64)
197-
return __clear_user_std(addr, n);
198-
return __clear_user_memset(addr, n);
208+
if (n < 64) {
209+
unsigned long ua_flags = uaccess_save_and_enable();
210+
n = __clear_user_std(addr, n);
211+
uaccess_restore(ua_flags);
212+
} else {
213+
n = __clear_user_memset(addr, n);
214+
}
215+
return n;
199216
}
200217

201218
#if 0

arch/arm/mm/context.c

Lines changed: 26 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
165165
__flush_icache_all();
166166
}
167167

168-
static int is_reserved_asid(u64 asid)
168+
static bool check_update_reserved_asid(u64 asid, u64 newasid)
169169
{
170170
int cpu;
171-
for_each_possible_cpu(cpu)
172-
if (per_cpu(reserved_asids, cpu) == asid)
173-
return 1;
174-
return 0;
171+
bool hit = false;
172+
173+
/*
174+
* Iterate over the set of reserved ASIDs looking for a match.
175+
* If we find one, then we can update our mm to use newasid
176+
* (i.e. the same ASID in the current generation) but we can't
177+
* exit the loop early, since we need to ensure that all copies
178+
* of the old ASID are updated to reflect the mm. Failure to do
179+
* so could result in us missing the reserved ASID in a future
180+
* generation.
181+
*/
182+
for_each_possible_cpu(cpu) {
183+
if (per_cpu(reserved_asids, cpu) == asid) {
184+
hit = true;
185+
per_cpu(reserved_asids, cpu) = newasid;
186+
}
187+
}
188+
189+
return hit;
175190
}
176191

177192
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,20 +196,22 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181196
u64 generation = atomic64_read(&asid_generation);
182197

183198
if (asid != 0) {
199+
u64 newasid = generation | (asid & ~ASID_MASK);
200+
184201
/*
185202
* If our current ASID was active during a rollover, we
186203
* can continue to use it and this was just a false alarm.
187204
*/
188-
if (is_reserved_asid(asid))
189-
return generation | (asid & ~ASID_MASK);
205+
if (check_update_reserved_asid(asid, newasid))
206+
return newasid;
190207

191208
/*
192209
* We had a valid ASID in a previous life, so try to re-use
193210
* it if possible.,
194211
*/
195212
asid &= ~ASID_MASK;
196213
if (!__test_and_set_bit(asid, asid_map))
197-
goto bump_gen;
214+
return newasid;
198215
}
199216

200217
/*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
216233

217234
__set_bit(asid, asid_map);
218235
cur_idx = asid;
219-
220-
bump_gen:
221-
asid |= generation;
222236
cpumask_clear(mm_cpumask(mm));
223-
return asid;
237+
return asid | generation;
224238
}
225239

226240
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)

arch/arm/mm/init.c

Lines changed: 62 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#include <linux/memblock.h>
2323
#include <linux/dma-contiguous.h>
2424
#include <linux/sizes.h>
25+
#include <linux/stop_machine.h>
2526

2627
#include <asm/cp15.h>
2728
#include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
627628
* safe to be called with preemption disabled, as under stop_machine().
628629
*/
629630
static inline void section_update(unsigned long addr, pmdval_t mask,
630-
pmdval_t prot)
631+
pmdval_t prot, struct mm_struct *mm)
631632
{
632-
struct mm_struct *mm;
633633
pmd_t *pmd;
634634

635-
mm = current->active_mm;
636635
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
637636

638637
#ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
656655
return !!(get_cr() & CR_XP);
657656
}
658657

659-
#define set_section_perms(perms, field) { \
660-
size_t i; \
661-
unsigned long addr; \
662-
\
663-
if (!arch_has_strict_perms()) \
664-
return; \
665-
\
666-
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
667-
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
668-
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
669-
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
670-
perms[i].start, perms[i].end, \
671-
SECTION_SIZE); \
672-
continue; \
673-
} \
674-
\
675-
for (addr = perms[i].start; \
676-
addr < perms[i].end; \
677-
addr += SECTION_SIZE) \
678-
section_update(addr, perms[i].mask, \
679-
perms[i].field); \
680-
} \
658+
void set_section_perms(struct section_perm *perms, int n, bool set,
659+
struct mm_struct *mm)
660+
{
661+
size_t i;
662+
unsigned long addr;
663+
664+
if (!arch_has_strict_perms())
665+
return;
666+
667+
for (i = 0; i < n; i++) {
668+
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
669+
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
670+
pr_err("BUG: section %lx-%lx not aligned to %lx\n",
671+
perms[i].start, perms[i].end,
672+
SECTION_SIZE);
673+
continue;
674+
}
675+
676+
for (addr = perms[i].start;
677+
addr < perms[i].end;
678+
addr += SECTION_SIZE)
679+
section_update(addr, perms[i].mask,
680+
set ? perms[i].prot : perms[i].clear, mm);
681+
}
682+
681683
}
682684

683-
static inline void fix_kernmem_perms(void)
685+
static void update_sections_early(struct section_perm perms[], int n)
684686
{
685-
set_section_perms(nx_perms, prot);
687+
struct task_struct *t, *s;
688+
689+
read_lock(&tasklist_lock);
690+
for_each_process(t) {
691+
if (t->flags & PF_KTHREAD)
692+
continue;
693+
for_each_thread(t, s)
694+
set_section_perms(perms, n, true, s->mm);
695+
}
696+
read_unlock(&tasklist_lock);
697+
set_section_perms(perms, n, true, current->active_mm);
698+
set_section_perms(perms, n, true, &init_mm);
699+
}
700+
701+
int __fix_kernmem_perms(void *unused)
702+
{
703+
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
704+
return 0;
705+
}
706+
707+
void fix_kernmem_perms(void)
708+
{
709+
stop_machine(__fix_kernmem_perms, NULL, NULL);
686710
}
687711

688712
#ifdef CONFIG_DEBUG_RODATA
713+
int __mark_rodata_ro(void *unused)
714+
{
715+
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
716+
return 0;
717+
}
718+
689719
void mark_rodata_ro(void)
690720
{
691-
set_section_perms(ro_perms, prot);
721+
stop_machine(__mark_rodata_ro, NULL, NULL);
692722
}
693723

694724
void set_kernel_text_rw(void)
695725
{
696-
set_section_perms(ro_perms, clear);
726+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
727+
current->active_mm);
697728
}
698729

699730
void set_kernel_text_ro(void)
700731
{
701-
set_section_perms(ro_perms, prot);
732+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
733+
current->active_mm);
702734
}
703735
#endif /* CONFIG_DEBUG_RODATA */
704736

arch/arm/mm/proc-v7.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
9595
.equ cpu_v7_suspend_size, 4 * 9
9696
#ifdef CONFIG_ARM_CPU_SUSPEND
9797
ENTRY(cpu_v7_do_suspend)
98-
stmfd sp!, {r4 - r10, lr}
98+
stmfd sp!, {r4 - r11, lr}
9999
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
100100
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
101101
stmia r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
112112
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
113113
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
114114
stmia r0, {r5 - r11}
115-
ldmfd sp!, {r4 - r10, pc}
115+
ldmfd sp!, {r4 - r11, pc}
116116
ENDPROC(cpu_v7_do_suspend)
117117

118118
ENTRY(cpu_v7_do_resume)

0 commit comments

Comments
 (0)