Skip to content

Commit aa5b105

Browse files
committed
Merge tag 'powerpc-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: - An implementation for the newly added hv_ops->flush() for the OPAL hvc console driver backends, I forgot to apply this after merging the hvc driver changes before the merge window. - Enable all PCI bridges at boot on powernv, to avoid races when multiple children of a bridge try to enable it simultaneously. This is a workaround until the PCI core can be enhanced to fix the races. - A fix to query PowerVM for the correct system topology at boot before initialising sched domains, seen in some configurations to cause broken scheduling etc. - A fix for pte_access_permitted() on "nohash" platforms. - Two commits to fix SIGBUS when using remap_pfn_range() seen on Power9 due to a workaround when using the nest MMU (GPUs, accelerators). - Another fix to the VFIO code used by KVM, the previous fix had some bugs which caused guests to not start in some configurations. - A handful of other minor fixes. Thanks to: Aneesh Kumar K.V, Benjamin Herrenschmidt, Christophe Leroy, Hari Bathini, Luke Dashjr, Mahesh Salgaonkar, Nicholas Piggin, Paul Mackerras, Srikar Dronamraju. * tag 'powerpc-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/mce: Fix SLB rebolting during MCE recovery path. KVM: PPC: Book3S: Fix guest DMA when guest partially backed by THP pages powerpc/mm/radix: Only need the Nest MMU workaround for R -> RW transition powerpc/mm/books3s: Add new pte bit to mark pte temporarily invalid. powerpc/nohash: fix pte_access_permitted() powerpc/topology: Get topology for shared processors at boot powerpc64/ftrace: Include ftrace.h needed for enable/disable calls powerpc/powernv/pci: Work around races in PCI bridge enabling powerpc/fadump: cleanup crash memory ranges support powerpc/powernv: provide a console flush operation for opal hvc driver powerpc/traps: Avoid rate limit messages from show unhandled signals powerpc/64s: Fix PACA_IRQ_HARD_DIS accounting in idle_power4()
2 parents e1dbc5a + 0f52b3a commit aa5b105

File tree

16 files changed

+170
-75
lines changed

16 files changed

+170
-75
lines changed

arch/powerpc/include/asm/book3s/64/pgtable.h

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,16 @@
4444

4545
#define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */
4646
#define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */
47+
/*
48+
* We need to mark a pmd pte invalid while splitting. We can do that by clearing
49+
* the _PAGE_PRESENT bit. But then that will be taken as a swap pte. In order to
50+
* differentiate between two use a SW field when invalidating.
51+
*
52+
* We do that temporary invalidate for regular pte entry in ptep_set_access_flags
53+
*
54+
* This is used only when _PAGE_PRESENT is cleared.
55+
*/
56+
#define _PAGE_INVALID _RPAGE_SW0
4757

4858
/*
4959
* Top and bottom bits of RPN which can be used by hash
@@ -568,7 +578,13 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
568578

569579
static inline int pte_present(pte_t pte)
570580
{
571-
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
581+
/*
582+
* A pte is considerent present if _PAGE_PRESENT is set.
583+
* We also need to consider the pte present which is marked
584+
* invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
585+
* if we find _PAGE_PRESENT cleared.
586+
*/
587+
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
572588
}
573589

574590
#ifdef CONFIG_PPC_MEM_KEYS

arch/powerpc/include/asm/nohash/pgtable.h

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -51,17 +51,14 @@ static inline int pte_present(pte_t pte)
5151
#define pte_access_permitted pte_access_permitted
5252
static inline bool pte_access_permitted(pte_t pte, bool write)
5353
{
54-
unsigned long pteval = pte_val(pte);
5554
/*
5655
* A read-only access is controlled by _PAGE_USER bit.
5756
* We have _PAGE_READ set for WRITE and EXECUTE
5857
*/
59-
unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
60-
61-
if (write)
62-
need_pte_bits |= _PAGE_WRITE;
58+
if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
59+
return false;
6360

64-
if ((pteval & need_pte_bits) != need_pte_bits)
61+
if (write && !pte_write(pte))
6562
return false;
6663

6764
return true;

arch/powerpc/include/asm/opal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,7 @@ extern void opal_configure_cores(void);
308308
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
309309
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
310310
extern int opal_put_chars_atomic(uint32_t vtermno, const char *buf, int total_len);
311+
extern int opal_flush_chars(uint32_t vtermno, bool wait);
311312
extern int opal_flush_console(uint32_t vtermno);
312313

313314
extern void hvc_opal_init_early(void);

arch/powerpc/include/asm/topology.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ extern int stop_topology_update(void);
9292
extern int prrn_is_enabled(void);
9393
extern int find_and_online_cpu_nid(int cpu);
9494
extern int timed_topology_update(int nsecs);
95+
extern void __init shared_proc_topology_init(void);
9596
#else
9697
static inline int start_topology_update(void)
9798
{
@@ -113,6 +114,10 @@ static inline int timed_topology_update(int nsecs)
113114
{
114115
return 0;
115116
}
117+
118+
#ifdef CONFIG_SMP
119+
static inline void shared_proc_topology_init(void) {}
120+
#endif
116121
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
117122

118123
#include <asm-generic/topology.h>

arch/powerpc/kernel/fadump.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
#include <linux/crash_dump.h>
3535
#include <linux/kobject.h>
3636
#include <linux/sysfs.h>
37+
#include <linux/slab.h>
3738

3839
#include <asm/debugfs.h>
3940
#include <asm/page.h>
@@ -1019,13 +1020,6 @@ static int fadump_setup_crash_memory_ranges(void)
10191020
pr_debug("Setup crash memory ranges.\n");
10201021
crash_mem_ranges = 0;
10211022

1022-
/* allocate memory for crash memory ranges for the first time */
1023-
if (!max_crash_mem_ranges) {
1024-
ret = allocate_crash_memory_ranges();
1025-
if (ret)
1026-
return ret;
1027-
}
1028-
10291023
/*
10301024
* add the first memory chunk (RMA_START through boot_memory_size) as
10311025
* a separate memory chunk. The reason is, at the time crash firmware

arch/powerpc/kernel/idle_power4.S

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
3232
cmpwi 0,r4,0
3333
beqlr
3434

35+
/* This sequence is similar to prep_irq_for_idle() */
36+
3537
/* Hard disable interrupts */
3638
mfmsr r7
3739
rldicl r0,r7,48,1
@@ -41,10 +43,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
4143
/* Check if something happened while soft-disabled */
4244
lbz r0,PACAIRQHAPPENED(r13)
4345
cmpwi cr0,r0,0
44-
bnelr
46+
bne- 2f
4547

46-
/* Soft-enable interrupts */
48+
/*
49+
* Soft-enable interrupts. This will make power4_fixup_nap return
50+
* to our caller with interrupts enabled (soft and hard). The caller
51+
* can cope with either interrupts disabled or enabled upon return.
52+
*/
4753
#ifdef CONFIG_TRACE_IRQFLAGS
54+
/* Tell the tracer interrupts are on, because idle responds to them. */
4855
mflr r0
4956
std r0,16(r1)
5057
stdu r1,-128(r1)
@@ -73,3 +80,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
7380
isync
7481
b 1b
7582

83+
2: /* Return if an interrupt had happened while soft disabled */
84+
/* Set the HARD_DIS flag because interrupts are now hard disabled */
85+
ori r0,r0,PACA_IRQ_HARD_DIS
86+
stb r0,PACAIRQHAPPENED(r13)
87+
blr

arch/powerpc/kernel/smp.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1160,6 +1160,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
11601160
if (smp_ops && smp_ops->bringup_done)
11611161
smp_ops->bringup_done();
11621162

1163+
/*
1164+
* On a shared LPAR, associativity needs to be requested.
1165+
* Hence, get numa topology before dumping cpu topology
1166+
*/
1167+
shared_proc_topology_init();
11631168
dump_numa_cpu_topology();
11641169

11651170
/*

arch/powerpc/kernel/traps.c

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -315,22 +315,21 @@ void user_single_step_siginfo(struct task_struct *tsk,
315315
info->si_addr = (void __user *)regs->nip;
316316
}
317317

318-
static bool show_unhandled_signals_ratelimited(void)
318+
static void show_signal_msg(int signr, struct pt_regs *regs, int code,
319+
unsigned long addr)
319320
{
320321
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
321322
DEFAULT_RATELIMIT_BURST);
322-
return show_unhandled_signals && __ratelimit(&rs);
323-
}
324323

325-
static void show_signal_msg(int signr, struct pt_regs *regs, int code,
326-
unsigned long addr)
327-
{
328-
if (!show_unhandled_signals_ratelimited())
324+
if (!show_unhandled_signals)
329325
return;
330326

331327
if (!unhandled_signal(current, signr))
332328
return;
333329

330+
if (!__ratelimit(&rs))
331+
return;
332+
334333
pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
335334
current->comm, current->pid, signame(signr), signr,
336335
addr, regs->nip, regs->link, code);

arch/powerpc/kvm/book3s_hv.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include <linux/compiler.h>
4747
#include <linux/of.h>
4848

49+
#include <asm/ftrace.h>
4950
#include <asm/reg.h>
5051
#include <asm/ppc-opcode.h>
5152
#include <asm/asm-prototypes.h>

arch/powerpc/mm/mmu_context_iommu.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
129129
long i, j, ret = 0, locked_entries = 0;
130130
unsigned int pageshift;
131131
unsigned long flags;
132+
unsigned long cur_ua;
132133
struct page *page = NULL;
133134

134135
mutex_lock(&mem_list_mutex);
@@ -177,7 +178,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
177178
}
178179

179180
for (i = 0; i < entries; ++i) {
180-
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
181+
cur_ua = ua + (i << PAGE_SHIFT);
182+
if (1 != get_user_pages_fast(cur_ua,
181183
1/* pages */, 1/* iswrite */, &page)) {
182184
ret = -EFAULT;
183185
for (j = 0; j < i; ++j)
@@ -196,7 +198,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
196198
if (is_migrate_cma_page(page)) {
197199
if (mm_iommu_move_page_from_cma(page))
198200
goto populate;
199-
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
201+
if (1 != get_user_pages_fast(cur_ua,
200202
1/* pages */, 1/* iswrite */,
201203
&page)) {
202204
ret = -EFAULT;
@@ -210,20 +212,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
210212
}
211213
populate:
212214
pageshift = PAGE_SHIFT;
213-
if (PageCompound(page)) {
215+
if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
214216
pte_t *pte;
215217
struct page *head = compound_head(page);
216218
unsigned int compshift = compound_order(head);
219+
unsigned int pteshift;
217220

218221
local_irq_save(flags); /* disables as well */
219-
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
220-
local_irq_restore(flags);
222+
pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
221223

222224
/* Double check it is still the same pinned page */
223225
if (pte && pte_page(*pte) == head &&
224-
pageshift == compshift)
225-
pageshift = max_t(unsigned int, pageshift,
226+
pteshift == compshift + PAGE_SHIFT)
227+
pageshift = max_t(unsigned int, pteshift,
226228
PAGE_SHIFT);
229+
local_irq_restore(flags);
227230
}
228231
mem->pageshift = min(mem->pageshift, pageshift);
229232
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;

arch/powerpc/mm/numa.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1078,7 +1078,6 @@ static int prrn_enabled;
10781078
static void reset_topology_timer(void);
10791079
static int topology_timer_secs = 1;
10801080
static int topology_inited;
1081-
static int topology_update_needed;
10821081

10831082
/*
10841083
* Change polling interval for associativity changes.
@@ -1306,11 +1305,8 @@ int numa_update_cpu_topology(bool cpus_locked)
13061305
struct device *dev;
13071306
int weight, new_nid, i = 0;
13081307

1309-
if (!prrn_enabled && !vphn_enabled) {
1310-
if (!topology_inited)
1311-
topology_update_needed = 1;
1308+
if (!prrn_enabled && !vphn_enabled && topology_inited)
13121309
return 0;
1313-
}
13141310

13151311
weight = cpumask_weight(&cpu_associativity_changes_mask);
13161312
if (!weight)
@@ -1423,7 +1419,6 @@ int numa_update_cpu_topology(bool cpus_locked)
14231419

14241420
out:
14251421
kfree(updates);
1426-
topology_update_needed = 0;
14271422
return changed;
14281423
}
14291424

@@ -1551,6 +1546,15 @@ int prrn_is_enabled(void)
15511546
return prrn_enabled;
15521547
}
15531548

1549+
void __init shared_proc_topology_init(void)
1550+
{
1551+
if (lppaca_shared_proc(get_lppaca())) {
1552+
bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
1553+
nr_cpumask_bits);
1554+
numa_update_cpu_topology(false);
1555+
}
1556+
}
1557+
15541558
static int topology_read(struct seq_file *file, void *v)
15551559
{
15561560
if (vphn_enabled || prrn_enabled)
@@ -1608,10 +1612,6 @@ static int topology_update_init(void)
16081612
return -ENOMEM;
16091613

16101614
topology_inited = 1;
1611-
if (topology_update_needed)
1612-
bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
1613-
nr_cpumask_bits);
1614-
16151615
return 0;
16161616
}
16171617
device_initcall(topology_update_init);

arch/powerpc/mm/pgtable-radix.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1045,20 +1045,22 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
10451045
struct mm_struct *mm = vma->vm_mm;
10461046
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
10471047
_PAGE_RW | _PAGE_EXEC);
1048+
1049+
unsigned long change = pte_val(entry) ^ pte_val(*ptep);
10481050
/*
10491051
* To avoid NMMU hang while relaxing access, we need mark
10501052
* the pte invalid in between.
10511053
*/
1052-
if (atomic_read(&mm->context.copros) > 0) {
1054+
if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
10531055
unsigned long old_pte, new_pte;
10541056

1055-
old_pte = __radix_pte_update(ptep, ~0, 0);
1057+
old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
10561058
/*
10571059
* new value of pte
10581060
*/
10591061
new_pte = old_pte | set;
10601062
radix__flush_tlb_page_psize(mm, address, psize);
1061-
__radix_pte_update(ptep, 0, new_pte);
1063+
__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
10621064
} else {
10631065
__radix_pte_update(ptep, 0, set);
10641066
/*

arch/powerpc/mm/slb.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
7070

7171
static inline void slb_shadow_clear(enum slb_index index)
7272
{
73-
WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
73+
WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
7474
}
7575

7676
static inline void create_shadowed_slbe(unsigned long ea, int ssize,

0 commit comments

Comments
 (0)