Skip to content

Commit 2620bf0

Browse files
committed
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King: "The usual collection of random fixes. Also some further fixes to the last set of security fixes, and some more from Will (which you may already have in a slightly different form)" * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7807/1: kexec: validate CPU hotplug support ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock ARM: 7811/1: locks: use early clobber in arch_spin_trylock ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event() ARM: 7809/1: perf: fix event validation for software group leaders ARM: Fix FIQ code on VIVT CPUs ARM: Fix !kuser helpers case ARM: Fix the world famous typo with is_gate_vma()
2 parents 359d16c + 2a28224 commit 2620bf0

File tree

8 files changed

+69
-33
lines changed

8 files changed

+69
-33
lines changed

arch/arm/include/asm/smp_plat.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
8888
{
8989
return 1 << mpidr_hash.bits;
9090
}
91+
92+
extern int platform_can_cpu_hotplug(void);
93+
9194
#endif

arch/arm/include/asm/spinlock.h

Lines changed: 31 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
107107
" subs %1, %0, %0, ror #16\n"
108108
" addeq %0, %0, %4\n"
109109
" strexeq %2, %0, [%3]"
110-
: "=&r" (slock), "=&r" (contended), "=r" (res)
110+
: "=&r" (slock), "=&r" (contended), "=&r" (res)
111111
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
112112
: "cc");
113113
} while (res);
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
168168

169169
static inline int arch_write_trylock(arch_rwlock_t *rw)
170170
{
171-
unsigned long tmp;
171+
unsigned long contended, res;
172172

173-
__asm__ __volatile__(
174-
" ldrex %0, [%1]\n"
175-
" teq %0, #0\n"
176-
" strexeq %0, %2, [%1]"
177-
: "=&r" (tmp)
178-
: "r" (&rw->lock), "r" (0x80000000)
179-
: "cc");
173+
do {
174+
__asm__ __volatile__(
175+
" ldrex %0, [%2]\n"
176+
" mov %1, #0\n"
177+
" teq %0, #0\n"
178+
" strexeq %1, %3, [%2]"
179+
: "=&r" (contended), "=&r" (res)
180+
: "r" (&rw->lock), "r" (0x80000000)
181+
: "cc");
182+
} while (res);
180183

181-
if (tmp == 0) {
184+
if (!contended) {
182185
smp_mb();
183186
return 1;
184187
} else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
254257

255258
static inline int arch_read_trylock(arch_rwlock_t *rw)
256259
{
257-
unsigned long tmp, tmp2 = 1;
260+
unsigned long contended, res;
258261

259-
__asm__ __volatile__(
260-
" ldrex %0, [%2]\n"
261-
" adds %0, %0, #1\n"
262-
" strexpl %1, %0, [%2]\n"
263-
: "=&r" (tmp), "+r" (tmp2)
264-
: "r" (&rw->lock)
265-
: "cc");
262+
do {
263+
__asm__ __volatile__(
264+
" ldrex %0, [%2]\n"
265+
" mov %1, #0\n"
266+
" adds %0, %0, #1\n"
267+
" strexpl %1, %0, [%2]"
268+
: "=&r" (contended), "=&r" (res)
269+
: "r" (&rw->lock)
270+
: "cc");
271+
} while (res);
266272

267-
smp_mb();
268-
return tmp2 == 0;
273+
/* If the lock is negative, then it is already held for write. */
274+
if (contended < 0x80000000) {
275+
smp_mb();
276+
return 1;
277+
} else {
278+
return 0;
279+
}
269280
}
270281

271282
/* read_can_lock - would read_trylock() succeed? */

arch/arm/kernel/entry-armv.S

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
357357
.endm
358358

359359
.macro kuser_cmpxchg_check
360-
#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
360+
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
361+
!defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
361362
#ifndef CONFIG_MMU
362363
#warning "NPTL on non MMU needs fixing"
363364
#else

arch/arm/kernel/fiq.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec)
8484

8585
void set_fiq_handler(void *start, unsigned int length)
8686
{
87-
#if defined(CONFIG_CPU_USE_DOMAINS)
88-
void *base = (void *)0xffff0000;
89-
#else
9087
void *base = vectors_page;
91-
#endif
9288
unsigned offset = FIQ_OFFSET;
9389

9490
memcpy(base + offset, start, length);
91+
if (!cache_is_vipt_nonaliasing())
92+
flush_icache_range(base + offset, offset + length);
9593
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
96-
if (!vectors_high())
97-
flush_icache_range(offset, offset + length);
9894
}
9995

10096
int claim_fiq(struct fiq_handler *f)

arch/arm/kernel/machine_kexec.c

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <asm/mmu_context.h>
1616
#include <asm/cacheflush.h>
1717
#include <asm/mach-types.h>
18+
#include <asm/smp_plat.h>
1819
#include <asm/system_misc.h>
1920

2021
extern const unsigned char relocate_new_kernel[];
@@ -38,6 +39,14 @@ int machine_kexec_prepare(struct kimage *image)
3839
__be32 header;
3940
int i, err;
4041

42+
/*
43+
* Validate that if the current HW supports SMP, then the SW supports
44+
* and implements CPU hotplug for the current HW. If not, we won't be
45+
* able to kexec reliably, so fail the prepare operation.
46+
*/
47+
if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
48+
return -EINVAL;
49+
4150
/*
4251
* No segment at default ATAGs address. try to locate
4352
* a dtb using magic.
@@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image)
134143
unsigned long reboot_code_buffer_phys;
135144
void *reboot_code_buffer;
136145

137-
if (num_online_cpus() > 1) {
138-
pr_err("kexec: error: multiple CPUs still online\n");
139-
return;
140-
}
146+
/*
147+
* This can only happen if machine_shutdown() failed to disable some
148+
* CPU, and that can only happen if the checks in
149+
* machine_kexec_prepare() were not correct. If this fails, we can't
150+
* reliably kexec anyway, so BUG_ON is appropriate.
151+
*/
152+
BUG_ON(num_online_cpus() > 1);
141153

142154
page_list = image->head & PAGE_MASK;
143155

arch/arm/kernel/perf_event.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
5656
int mapping;
5757

5858
if (config >= PERF_COUNT_HW_MAX)
59-
return -ENOENT;
59+
return -EINVAL;
6060

6161
mapping = (*event_map)[config];
6262
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
@@ -258,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
258258
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
259259
struct pmu *leader_pmu = event->group_leader->pmu;
260260

261+
if (is_software_event(event))
262+
return 1;
263+
261264
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
262265
return 1;
263266

arch/arm/kernel/process.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr)
462462
{
463463
return in_gate_area(NULL, addr);
464464
}
465-
#define is_gate_vma(vma) ((vma) = &gate_vma)
465+
#define is_gate_vma(vma) ((vma) == &gate_vma)
466466
#else
467467
#define is_gate_vma(vma) 0
468468
#endif

arch/arm/kernel/smp.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
145145
return -ENOSYS;
146146
}
147147

148+
int platform_can_cpu_hotplug(void)
149+
{
150+
#ifdef CONFIG_HOTPLUG_CPU
151+
if (smp_ops.cpu_kill)
152+
return 1;
153+
#endif
154+
155+
return 0;
156+
}
157+
148158
#ifdef CONFIG_HOTPLUG_CPU
149159
static void percpu_timer_stop(void);
150160

0 commit comments

Comments
 (0)