Skip to content

Commit 3b8c9f1

Browse files
committed
arm64: IPI each CPU after invalidating the I-cache for kernel mappings
When invalidating the instruction cache for a kernel mapping via flush_icache_range(), it is also necessary to flush the pipeline for other CPUs so that instructions fetched into the pipeline before the I-cache invalidation are discarded. For example, if module 'foo' is unloaded and then module 'bar' is loaded into the same area of memory, a CPU could end up executing instructions from 'foo' when branching into 'bar' if these instructions were fetched into the pipeline before 'foo' was unloaded. Whilst this is highly unlikely to occur in practice, particularly as any exception acts as a context-synchronizing operation, following the letter of the architecture requires us to execute an ISB on each CPU in order for the new instruction stream to be visible. Acked-by: Catalin Marinas <[email protected]> Signed-off-by: Will Deacon <[email protected]>
1 parent 7373fed commit 3b8c9f1

File tree

4 files changed

+33
-18
lines changed

4 files changed

+33
-18
lines changed

arch/arm64/include/asm/cacheflush.h

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#ifndef __ASM_CACHEFLUSH_H
2020
#define __ASM_CACHEFLUSH_H
2121

22+
#include <linux/kgdb.h>
2223
#include <linux/mm.h>
2324

2425
/*
@@ -71,7 +72,7 @@
7172
* - kaddr - page address
7273
* - size - region size
7374
*/
74-
extern void flush_icache_range(unsigned long start, unsigned long end);
75+
extern void __flush_icache_range(unsigned long start, unsigned long end);
7576
extern int invalidate_icache_range(unsigned long start, unsigned long end);
7677
extern void __flush_dcache_area(void *addr, size_t len);
7778
extern void __inval_dcache_area(void *addr, size_t len);
@@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len);
8182
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
8283
extern void sync_icache_aliases(void *kaddr, unsigned long len);
8384

85+
static inline void flush_icache_range(unsigned long start, unsigned long end)
86+
{
87+
__flush_icache_range(start, end);
88+
89+
/*
90+
* IPI all online CPUs so that they undergo a context synchronization
91+
* event and are forced to refetch the new instructions.
92+
*/
93+
#ifdef CONFIG_KGDB
94+
/*
95+
* KGDB performs cache maintenance with interrupts disabled, so we
96+
* will deadlock trying to IPI the secondary CPUs. In theory, we can
97+
* set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
98+
* just means that KGDB will elide the maintenance altogether! As it
99+
* turns out, KGDB uses IPIs to round-up the secondary CPUs during
100+
* the patching operation, so we don't need extra IPIs here anyway.
101+
* In which case, add a KGDB-specific bodge and return early.
102+
*/
103+
if (kgdb_connected && irqs_disabled())
104+
return;
105+
#endif
106+
kick_all_cpus_sync();
107+
}
108+
84109
static inline void flush_cache_mm(struct mm_struct *mm)
85110
{
86111
}

arch/arm64/kernel/cpu_errata.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
107107
for (i = 0; i < SZ_2K; i += 0x80)
108108
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
109109

110-
flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
110+
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
111111
}
112112

113113
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,

arch/arm64/kernel/insn.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -216,8 +216,8 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
216216

217217
ret = aarch64_insn_write(tp, insn);
218218
if (ret == 0)
219-
flush_icache_range((uintptr_t)tp,
220-
(uintptr_t)tp + AARCH64_INSN_SIZE);
219+
__flush_icache_range((uintptr_t)tp,
220+
(uintptr_t)tp + AARCH64_INSN_SIZE);
221221

222222
return ret;
223223
}
@@ -283,18 +283,8 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
283283
if (ret)
284284
return ret;
285285

286-
if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
287-
/*
288-
* ARMv8 architecture doesn't guarantee all CPUs see
289-
* the new instruction after returning from function
290-
* aarch64_insn_patch_text_nosync(). So send IPIs to
291-
* all other CPUs to achieve instruction
292-
* synchronization.
293-
*/
294-
ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
295-
kick_all_cpus_sync();
296-
return ret;
297-
}
286+
if (aarch64_insn_hotpatch_safe(insn, insns[0]))
287+
return aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
298288
}
299289

300290
return aarch64_insn_patch_text_sync(addrs, insns, cnt);

arch/arm64/mm/cache.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
* - start - virtual start address of region
3636
* - end - virtual end address of region
3737
*/
38-
ENTRY(flush_icache_range)
38+
ENTRY(__flush_icache_range)
3939
/* FALLTHROUGH */
4040

4141
/*
@@ -77,7 +77,7 @@ alternative_else_nop_endif
7777
9:
7878
mov x0, #-EFAULT
7979
b 1b
80-
ENDPROC(flush_icache_range)
80+
ENDPROC(__flush_icache_range)
8181
ENDPROC(__flush_cache_user_range)
8282

8383
/*

0 commit comments

Comments
 (0)