Skip to content

Commit efb52a7

Browse files
committed
Merge tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "A bit of a big batch, partly because I didn't send any last week, and also just because the BPF fixes happened to land this week. Summary: - Fix a regression hit by the IPR SCSI driver, introduced by the recent addition of MSI domains on pseries. - A big series including 8 BPF fixes, some with potential security impact and the rest various code generation issues. - Fix our program check assembler entry path, which was accidentally jumping into a gas macro and generating strange stack frames, which could confuse find_bug(). - A couple of fixes, and related changes, to fix corner cases in our machine check handling. - Fix our DMA IOMMU ops, which were not always returning the optimal DMA mask, leading to at least one device falling back to 32-bit DMA when it shouldn't. - A fix for KUAP handling on 32-bit Book3S. - Fix crashes seen when kdumping on some pseries systems. Thanks to Naveen N. Rao, Nicholas Piggin, Alexey Kardashevskiy, Cédric Le Goater, Christophe Leroy, Mahesh Salgaonkar, Abdul Haleem, Christoph Hellwig, Johan Almbladh, Stan Johnson" * tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: pseries/eeh: Fix the kdump kernel crash during eeh_pseries_init powerpc/32s: Fix kuap_kernel_restore() powerpc/pseries/msi: Add an empty irq_write_msi_msg() handler powerpc/64s: Fix unrecoverable MCE calling async handler from NMI powerpc/64/interrupt: Reconcile soft-mask state in NMI and fix false BUG powerpc/64: warn if local irqs are enabled in NMI or hardirq context powerpc/traps: do not enable irqs in _exception powerpc/64s: fix program check interrupt emergency stack path powerpc/bpf ppc32: Fix BPF_SUB when imm == 0x80000000 powerpc/bpf ppc32: Do not emit zero extend instruction for 64-bit BPF_END powerpc/bpf ppc32: Fix JMP32_JSET_K powerpc/bpf ppc32: Fix ALU32 BPF_ARSH operation powerpc/bpf: Emit stf barrier instruction sequences for BPF_NOSPEC powerpc/security: Add a helper to query stf_barrier type powerpc/bpf: Fix BPF_SUB when imm == 0x80000000 powerpc/bpf: Fix BPF_MOD when imm == 1 powerpc/bpf: Validate branch ranges powerpc/lib: Add helper to check if offset is within conditional branch range powerpc/iommu: Report the correct most efficient DMA mask for PCI devices
2 parents 75cd9b0 + eb8257a commit efb52a7

File tree

17 files changed

+234
-75
lines changed

17 files changed

+234
-75
lines changed

arch/powerpc/include/asm/book3s/32/kup.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
136136
if (kuap_is_disabled())
137137
return;
138138

139+
if (unlikely(kuap != KUAP_NONE)) {
140+
current->thread.kuap = KUAP_NONE;
141+
kuap_lock(kuap, false);
142+
}
143+
144+
if (likely(regs->kuap == KUAP_NONE))
145+
return;
146+
139147
current->thread.kuap = regs->kuap;
140148

141149
kuap_unlock(regs->kuap, false);

arch/powerpc/include/asm/code-patching.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#define BRANCH_ABSOLUTE 0x2
2424

2525
bool is_offset_in_branch_range(long offset);
26+
bool is_offset_in_cond_branch_range(long offset);
2627
int create_branch(struct ppc_inst *instr, const u32 *addr,
2728
unsigned long target, int flags);
2829
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,

arch/powerpc/include/asm/interrupt.h

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
265265
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
266266
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
267267

268-
if (is_implicit_soft_masked(regs)) {
269-
// Adjust regs->softe soft implicit soft-mask, so
270-
// arch_irq_disabled_regs(regs) behaves as expected.
268+
if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
269+
/*
270+
* Adjust regs->softe to be soft-masked if it had not been
271+
* reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
272+
* not yet set disabled), or if it was in an implicit soft
273+
* masked state. This makes arch_irq_disabled_regs(regs)
274+
* behave as expected.
275+
*/
271276
regs->softe = IRQS_ALL_DISABLED;
272277
}
273-
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
274-
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
275278

276279
/* Don't do any per-CPU operations until interrupt state is fixed */
277280

@@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
525528
/* kernel/traps.c */
526529
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
527530
#ifdef CONFIG_PPC_BOOK3S_64
528-
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
529-
#else
530-
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
531+
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
531532
#endif
533+
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
532534
DECLARE_INTERRUPT_HANDLER(SMIException);
533535
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
534536
DECLARE_INTERRUPT_HANDLER(unknown_exception);

arch/powerpc/include/asm/security_features.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
3939
return !!(powerpc_security_features & feature);
4040
}
4141

42+
#ifdef CONFIG_PPC_BOOK3S_64
43+
enum stf_barrier_type stf_barrier_type_get(void);
44+
#else
45+
static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
46+
#endif
4247

4348
// Features indicating support for Spectre/Meltdown mitigations
4449

arch/powerpc/kernel/dma-iommu.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
184184
struct iommu_table *tbl = get_iommu_table_base(dev);
185185
u64 mask;
186186

187+
if (dev_is_pci(dev)) {
188+
u64 bypass_mask = dma_direct_get_required_mask(dev);
189+
190+
if (dma_iommu_dma_supported(dev, bypass_mask)) {
191+
dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
192+
return bypass_mask;
193+
}
194+
}
195+
187196
if (!tbl)
188197
return 0;
189198

arch/powerpc/kernel/exceptions-64s.S

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
12431243
li r10,MSR_RI
12441244
mtmsrd r10,1
12451245
addi r3,r1,STACK_FRAME_OVERHEAD
1246-
bl machine_check_exception
1246+
bl machine_check_exception_async
12471247
b interrupt_return_srr
12481248

12491249

@@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
13031303
subi r12,r12,1
13041304
sth r12,PACA_IN_MCE(r13)
13051305

1306-
/* Invoke machine_check_exception to print MCE event and panic. */
1306+
/*
1307+
* Invoke machine_check_exception to print MCE event and panic.
1308+
* This is the NMI version of the handler because we are called from
1309+
* the early handler which is a true NMI.
1310+
*/
13071311
addi r3,r1,STACK_FRAME_OVERHEAD
13081312
bl machine_check_exception
13091313

@@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
16651669
*/
16661670

16671671
andi. r10,r12,MSR_PR
1668-
bne 2f /* If userspace, go normal path */
1672+
bne .Lnormal_stack /* If userspace, go normal path */
16691673

16701674
andis. r10,r12,(SRR1_PROGTM)@h
1671-
bne 1f /* If TM, emergency */
1675+
bne .Lemergency_stack /* If TM, emergency */
16721676

16731677
cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
1674-
blt 2f /* normal path if not */
1678+
blt .Lnormal_stack /* normal path if not */
16751679

16761680
/* Use the emergency stack */
1677-
1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
1681+
.Lemergency_stack:
1682+
andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
16781683
/* 3 in EXCEPTION_PROLOG_COMMON */
16791684
mr r10,r1 /* Save r1 */
16801685
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
16811686
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
16821687
__ISTACK(program_check)=0
16831688
__GEN_COMMON_BODY program_check
1684-
b 3f
1685-
2:
1689+
b .Ldo_program_check
1690+
1691+
.Lnormal_stack:
16861692
__ISTACK(program_check)=1
16871693
__GEN_COMMON_BODY program_check
1688-
3:
1694+
1695+
.Ldo_program_check:
16891696
addi r3,r1,STACK_FRAME_OVERHEAD
16901697
bl program_check_exception
16911698
REST_NVGPRS(r1) /* instruction emulation may change GPRs */

arch/powerpc/kernel/irq.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
229229
return;
230230
}
231231

232+
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
233+
WARN_ON_ONCE(in_nmi() || in_hardirq());
234+
232235
/*
233236
* After the stb, interrupts are unmasked and there are no interrupts
234237
* pending replay. The restart sequence makes this atomic with
@@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
321324
if (mask)
322325
return;
323326

327+
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
328+
WARN_ON_ONCE(in_nmi() || in_hardirq());
329+
324330
/*
325331
* From this point onward, we can take interrupts, preempt,
326332
* etc... unless we got hard-disabled. We check if an event

arch/powerpc/kernel/security.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)
263263

264264
early_param("no_stf_barrier", handle_no_stf_barrier);
265265

266+
enum stf_barrier_type stf_barrier_type_get(void)
267+
{
268+
return stf_enabled_flush_types;
269+
}
270+
266271
/* This is the generic flag used by other architectures */
267272
static int __init handle_ssbd(char *p)
268273
{

arch/powerpc/kernel/traps.c

Lines changed: 27 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
340340
return false;
341341
}
342342

343-
show_signal_msg(signr, regs, code, addr);
343+
/*
344+
* Must not enable interrupts even for user-mode exception, because
345+
* this can be called from machine check, which may be a NMI or IRQ
346+
* which don't like interrupts being enabled. Could check for
347+
* in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
348+
* reason why _exception() should enable irqs for an exception handler,
349+
* the handlers themselves do that directly.
350+
*/
344351

345-
if (arch_irqs_disabled())
346-
interrupt_cond_local_irq_enable(regs);
352+
show_signal_msg(signr, regs, code, addr);
347353

348354
current->thread.trap_nr = code;
349355

@@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
790796
* do_exit() checks for in_interrupt() and panics in that case, so
791797
* exit the irq/nmi before calling die.
792798
*/
793-
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
794-
irq_exit();
795-
else
799+
if (in_nmi())
796800
nmi_exit();
801+
else
802+
irq_exit();
797803
die(str, regs, err);
798804
}
799805

800806
/*
801-
* BOOK3S_64 does not call this handler as a non-maskable interrupt
807+
* BOOK3S_64 does not usually call this handler as a non-maskable interrupt
802808
* (it uses its own early real-mode handler to handle the MCE proper
803809
* and then raises irq_work to call this handler when interrupts are
804-
* enabled).
810+
* enabled). The only time when this is not true is if the early handler
811+
* is unrecoverable, then it does call this directly to try to get a
812+
* message out.
805813
*/
806-
#ifdef CONFIG_PPC_BOOK3S_64
807-
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
808-
#else
809-
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
810-
#endif
814+
static void __machine_check_exception(struct pt_regs *regs)
811815
{
812816
int recover = 0;
813817

@@ -841,12 +845,19 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
841845
/* Must die if the interrupt is not recoverable */
842846
if (regs_is_unrecoverable(regs))
843847
die_mce("Unrecoverable Machine check", regs, SIGBUS);
848+
}
844849

845850
#ifdef CONFIG_PPC_BOOK3S_64
846-
return;
847-
#else
848-
return 0;
851+
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
852+
{
853+
__machine_check_exception(regs);
854+
}
849855
#endif
856+
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
857+
{
858+
__machine_check_exception(regs);
859+
860+
return 0;
850861
}
851862

852863
DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */

arch/powerpc/lib/code-patching.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
228228
return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
229229
}
230230

231+
bool is_offset_in_cond_branch_range(long offset)
232+
{
233+
return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
234+
}
235+
231236
/*
232237
* Helper to check if a given instruction is a conditional branch
233238
* Derived from the conditional checks in analyse_instr()
@@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
280285
offset = offset - (unsigned long)addr;
281286

282287
/* Check we can represent the target in the instruction format */
283-
if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
288+
if (!is_offset_in_cond_branch_range(offset))
284289
return 1;
285290

286291
/* Mask out the flags and target, so they don't step on each other. */

arch/powerpc/net/bpf_jit.h

Lines changed: 21 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,16 +24,30 @@
2424
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
2525

2626
/* Long jump; (unconditional 'branch') */
27-
#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
28-
(((dest) - (ctx->idx * 4)) & 0x03fffffc))
27+
#define PPC_JMP(dest) \
28+
do { \
29+
long offset = (long)(dest) - (ctx->idx * 4); \
30+
if (!is_offset_in_branch_range(offset)) { \
31+
pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
32+
return -ERANGE; \
33+
} \
34+
EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
35+
} while (0)
36+
2937
/* blr; (unconditional 'branch' with link) to absolute address */
3038
#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
3139
(((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
3240
/* "cond" here covers BO:BI fields. */
33-
#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
34-
(((cond) & 0x3ff) << 16) | \
35-
(((dest) - (ctx->idx * 4)) & \
36-
0xfffc))
41+
#define PPC_BCC_SHORT(cond, dest) \
42+
do { \
43+
long offset = (long)(dest) - (ctx->idx * 4); \
44+
if (!is_offset_in_cond_branch_range(offset)) { \
45+
pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
46+
return -ERANGE; \
47+
} \
48+
EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
49+
} while (0)
50+
3751
/* Sign-extended 32-bit immediate load */
3852
#define PPC_LI32(d, i) do { \
3953
if ((int)(uintptr_t)(i) >= -32768 && \
@@ -78,11 +92,6 @@
7892
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
7993
#endif
8094

81-
static inline bool is_nearbranch(int offset)
82-
{
83-
return (offset < 32768) && (offset >= -32768);
84-
}
85-
8695
/*
8796
* The fly in the ointment of code size changing from pass to pass is
8897
* avoided by padding the short branch case with a NOP. If code size differs
@@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
91100
* state.
92101
*/
93102
#define PPC_BCC(cond, dest) do { \
94-
if (is_nearbranch((dest) - (ctx->idx * 4))) { \
103+
if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
95104
PPC_BCC_SHORT(cond, dest); \
96105
EMIT(PPC_RAW_NOP()); \
97106
} else { \

arch/powerpc/net/bpf_jit64.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,18 +16,18 @@
1616
* with our redzone usage.
1717
*
1818
* [ prev sp ] <-------------
19-
* [ nv gpr save area ] 6*8 |
19+
* [ nv gpr save area ] 5*8 |
2020
* [ tail_call_cnt ] 8 |
21-
* [ local_tmp_var ] 8 |
21+
* [ local_tmp_var ] 16 |
2222
* fp (r31) --> [ ebpf stack space ] upto 512 |
2323
* [ frame header ] 32/112 |
2424
* sp (r1) ---> [ stack pointer ] --------------
2525
*/
2626

2727
/* for gpr non volatile registers BPG_REG_6 to 10 */
28-
#define BPF_PPC_STACK_SAVE (6*8)
28+
#define BPF_PPC_STACK_SAVE (5*8)
2929
/* for bpf JIT code internal usage */
30-
#define BPF_PPC_STACK_LOCALS 16
30+
#define BPF_PPC_STACK_LOCALS 24
3131
/* stack frame excluding BPF stack, ensure this is quadword aligned */
3232
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
3333
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)

arch/powerpc/net/bpf_jit_comp.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
210210
/* Now build the prologue, body code & epilogue for real. */
211211
cgctx.idx = 0;
212212
bpf_jit_build_prologue(code_base, &cgctx);
213-
bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
213+
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
214+
bpf_jit_binary_free(bpf_hdr);
215+
fp = org_fp;
216+
goto out_addrs;
217+
}
214218
bpf_jit_build_epilogue(code_base, &cgctx);
215219

216220
if (bpf_jit_enable > 1)

0 commit comments

Comments
 (0)