Skip to content
This repository was archived by the owner on Nov 8, 2023. It is now read-only.

Commit 20edcec

Browse files
committed
Merge tag 'powerpc-6.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: - Fix Speculation_Store_Bypass reporting in /proc/self/status on Power10 - Fix HPT with 4K pages since recent changes by implementing pmd_same() - Fix 64-bit native_hpte_remove() to be irq-safe Thanks to Aneesh Kumar K.V, Nageswara R Sastry, and Russell Currey. * tag 'powerpc-6.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/mm/book3s64/hash/4k: Add pmd_same callback for 4K page size powerpc/64e: Fix obtool warnings in exceptions-64e.S powerpc/security: Fix Speculation_Store_Bypass reporting on Power10 powerpc/64s: Fix native_hpte_remove() to be irq-safe
2 parents 6eede06 + cf53564 commit 20edcec

File tree

6 files changed

+51
-46
lines changed

6 files changed

+51
-46
lines changed

arch/powerpc/include/asm/book3s/64/hash-4k.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -136,12 +136,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
136136
return 0;
137137
}
138138

139-
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
140-
{
141-
BUG();
142-
return 0;
143-
}
144-
145139
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
146140
{
147141
BUG();

arch/powerpc/include/asm/book3s/64/hash-64k.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -263,11 +263,6 @@ static inline int hash__pmd_trans_huge(pmd_t pmd)
263263
(_PAGE_PTE | H_PAGE_THP_HUGE));
264264
}
265265

266-
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
267-
{
268-
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
269-
}
270-
271266
static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
272267
{
273268
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));

arch/powerpc/include/asm/book3s/64/hash.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,11 @@ static inline int get_region_id(unsigned long ea)
132132
return region_id;
133133
}
134134

135+
static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
136+
{
137+
return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
138+
}
139+
135140
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
136141
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
137142
static inline int hash__p4d_bad(p4d_t p4d)

arch/powerpc/kernel/exceptions-64e.S

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
* Copyright (C) 2007 Ben. Herrenschmidt ([email protected]), IBM Corp.
66
*/
77

8+
#include <linux/linkage.h>
89
#include <linux/threads.h>
910
#include <asm/reg.h>
1011
#include <asm/page.h>
@@ -66,7 +67,7 @@
6667
#define SPECIAL_EXC_LOAD(reg, name) \
6768
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
6869

69-
special_reg_save:
70+
SYM_CODE_START_LOCAL(special_reg_save)
7071
/*
7172
* We only need (or have stack space) to save this stuff if
7273
* we interrupted the kernel.
@@ -131,8 +132,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
131132
SPECIAL_EXC_STORE(r10,CSRR1)
132133

133134
blr
135+
SYM_CODE_END(special_reg_save)
134136

135-
ret_from_level_except:
137+
SYM_CODE_START_LOCAL(ret_from_level_except)
136138
ld r3,_MSR(r1)
137139
andi. r3,r3,MSR_PR
138140
beq 1f
@@ -206,6 +208,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
206208
mtxer r11
207209

208210
blr
211+
SYM_CODE_END(ret_from_level_except)
209212

210213
.macro ret_from_level srr0 srr1 paca_ex scratch
211214
bl ret_from_level_except
@@ -232,13 +235,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
232235
mfspr r13,\scratch
233236
.endm
234237

235-
ret_from_crit_except:
238+
SYM_CODE_START_LOCAL(ret_from_crit_except)
236239
ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
237240
rfci
241+
SYM_CODE_END(ret_from_crit_except)
238242

239-
ret_from_mc_except:
243+
SYM_CODE_START_LOCAL(ret_from_mc_except)
240244
ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
241245
rfmci
246+
SYM_CODE_END(ret_from_mc_except)
242247

243248
/* Exception prolog code for all exceptions */
244249
#define EXCEPTION_PROLOG(n, intnum, type, addition) \
@@ -978,20 +983,22 @@ masked_interrupt_book3e_0x2c0:
978983
* r14 and r15 containing the fault address and error code, with the
979984
* original values stashed away in the PACA
980985
*/
981-
storage_fault_common:
986+
SYM_CODE_START_LOCAL(storage_fault_common)
982987
addi r3,r1,STACK_INT_FRAME_REGS
983988
bl do_page_fault
984989
b interrupt_return
990+
SYM_CODE_END(storage_fault_common)
985991

986992
/*
987993
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
988994
* continues here.
989995
*/
990-
alignment_more:
996+
SYM_CODE_START_LOCAL(alignment_more)
991997
addi r3,r1,STACK_INT_FRAME_REGS
992998
bl alignment_exception
993999
REST_NVGPRS(r1)
9941000
b interrupt_return
1001+
SYM_CODE_END(alignment_more)
9951002

9961003
/*
9971004
* Trampolines used when spotting a bad kernel stack pointer in
@@ -1030,8 +1037,7 @@ BAD_STACK_TRAMPOLINE(0xe00)
10301037
BAD_STACK_TRAMPOLINE(0xf00)
10311038
BAD_STACK_TRAMPOLINE(0xf20)
10321039

1033-
.globl bad_stack_book3e
1034-
bad_stack_book3e:
1040+
_GLOBAL(bad_stack_book3e)
10351041
/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
10361042
mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
10371043
ld r1,PACAEMERGSP(r13)
@@ -1285,8 +1291,7 @@ have_hes:
12851291
* ever takes any parameters, the SCOM code must also be updated to
12861292
* provide them.
12871293
*/
1288-
.globl a2_tlbinit_code_start
1289-
a2_tlbinit_code_start:
1294+
_GLOBAL(a2_tlbinit_code_start)
12901295

12911296
ori r11,r3,MAS0_WQ_ALLWAYS
12921297
oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
@@ -1479,16 +1484,15 @@ _GLOBAL(book3e_secondary_thread_init)
14791484
mflr r28
14801485
b 3b
14811486

1482-
.globl init_core_book3e
1483-
init_core_book3e:
1487+
_GLOBAL(init_core_book3e)
14841488
/* Establish the interrupt vector base */
14851489
tovirt(r2,r2)
14861490
LOAD_REG_ADDR(r3, interrupt_base_book3e)
14871491
mtspr SPRN_IVPR,r3
14881492
sync
14891493
blr
14901494

1491-
init_thread_book3e:
1495+
SYM_CODE_START_LOCAL(init_thread_book3e)
14921496
lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
14931497
mtspr SPRN_EPCR,r3
14941498

@@ -1502,6 +1506,7 @@ init_thread_book3e:
15021506
mtspr SPRN_TSR,r3
15031507

15041508
blr
1509+
SYM_CODE_END(init_thread_book3e)
15051510

15061511
_GLOBAL(__setup_base_ivors)
15071512
SET_IVOR(0, 0x020) /* Critical Input */

arch/powerpc/kernel/security.c

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -364,26 +364,27 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
364364

365365
static int ssb_prctl_get(struct task_struct *task)
366366
{
367+
/*
368+
* The STF_BARRIER feature is on by default, so if it's off that means
369+
* firmware has explicitly said the CPU is not vulnerable via either
370+
* the hypercall or device tree.
371+
*/
372+
if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
373+
return PR_SPEC_NOT_AFFECTED;
374+
375+
/*
376+
* If the system's CPU has no known barrier (see setup_stf_barrier())
377+
* then assume that the CPU is not vulnerable.
378+
*/
367379
if (stf_enabled_flush_types == STF_BARRIER_NONE)
368-
/*
369-
* We don't have an explicit signal from firmware that we're
370-
* vulnerable or not, we only have certain CPU revisions that
371-
* are known to be vulnerable.
372-
*
373-
* We assume that if we're on another CPU, where the barrier is
374-
* NONE, then we are not vulnerable.
375-
*/
376380
return PR_SPEC_NOT_AFFECTED;
377-
else
378-
/*
379-
* If we do have a barrier type then we are vulnerable. The
380-
* barrier is not a global or per-process mitigation, so the
381-
* only value we can report here is PR_SPEC_ENABLE, which
382-
* appears as "vulnerable" in /proc.
383-
*/
384-
return PR_SPEC_ENABLE;
385-
386-
return -EINVAL;
381+
382+
/*
383+
* Otherwise the CPU is vulnerable. The barrier is not a global or
384+
* per-process mitigation, so the only value that can be reported here
385+
* is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
386+
*/
387+
return PR_SPEC_ENABLE;
387388
}
388389

389390
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)

arch/powerpc/mm/book3s64/hash_native.c

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -328,10 +328,12 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
328328

329329
static long native_hpte_remove(unsigned long hpte_group)
330330
{
331+
unsigned long hpte_v, flags;
331332
struct hash_pte *hptep;
332333
int i;
333334
int slot_offset;
334-
unsigned long hpte_v;
335+
336+
local_irq_save(flags);
335337

336338
DBG_LOW(" remove(group=%lx)\n", hpte_group);
337339

@@ -356,13 +358,16 @@ static long native_hpte_remove(unsigned long hpte_group)
356358
slot_offset &= 0x7;
357359
}
358360

359-
if (i == HPTES_PER_GROUP)
360-
return -1;
361+
if (i == HPTES_PER_GROUP) {
362+
i = -1;
363+
goto out;
364+
}
361365

362366
/* Invalidate the hpte. NOTE: this also unlocks it */
363367
release_hpte_lock();
364368
hptep->v = 0;
365-
369+
out:
370+
local_irq_restore(flags);
366371
return i;
367372
}
368373

0 commit comments

Comments
 (0)