Skip to content

Commit e7e8184

Browse files
npigginmpe
authored andcommitted
powerpc/64s: move machine check SLB flushing to mm/slb.c
The machine check code that flushes and restores bolted segments in real mode belongs in mm/slb.c. This will also be used by pseries machine check and idle code in future changes. Signed-off-by: Nicholas Piggin <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
1 parent ae24ce5 commit e7e8184

File tree

3 files changed

+51
-17
lines changed

3 files changed

+51
-17
lines changed

arch/powerpc/include/asm/book3s/64/mmu-hash.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -497,6 +497,9 @@ extern void hpte_init_native(void);
497497

498498
extern void slb_initialize(void);
499499
extern void slb_flush_and_rebolt(void);
500+
void slb_flush_all_realmode(void);
501+
void __slb_restore_bolted_realmode(void);
502+
void slb_restore_bolted_realmode(void);
500503

501504
extern void slb_vmalloc_update(void);
502505
extern void slb_set_size(u16 size);

arch/powerpc/kernel/mce_power.c

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -62,11 +62,8 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
6262
#ifdef CONFIG_PPC_BOOK3S_64
6363
static void flush_and_reload_slb(void)
6464
{
65-
struct slb_shadow *slb;
66-
unsigned long i, n;
67-
6865
/* Invalidate all SLBs */
69-
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
66+
slb_flush_all_realmode();
7067

7168
#ifdef CONFIG_KVM_BOOK3S_HANDLER
7269
/*
@@ -76,22 +73,17 @@ static void flush_and_reload_slb(void)
7673
if (get_paca()->kvm_hstate.in_guest)
7774
return;
7875
#endif
79-
80-
/* For host kernel, reload the SLBs from shadow SLB buffer. */
81-
slb = get_slb_shadow();
82-
if (!slb)
76+
if (early_radix_enabled())
8377
return;
8478

85-
n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
86-
87-
/* Load up the SLB entries from shadow SLB */
88-
for (i = 0; i < n; i++) {
89-
unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
90-
unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
79+
/*
80+
* This probably shouldn't happen, but it may be possible it's
81+
* called in early boot before SLB shadows are allocated.
82+
*/
83+
if (!get_slb_shadow())
84+
return;
9185

92-
rb = (rb & ~0xFFFul) | i;
93-
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
94-
}
86+
slb_restore_bolted_realmode();
9587
}
9688
#endif
9789

arch/powerpc/mm/slb.c

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,45 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
9090
: "memory" );
9191
}
9292

93+
/*
94+
* Insert bolted entries into SLB (which may not be empty, so don't clear
95+
* slb_cache_ptr).
96+
*/
97+
void __slb_restore_bolted_realmode(void)
98+
{
99+
struct slb_shadow *p = get_slb_shadow();
100+
enum slb_index index;
101+
102+
/* No isync needed because realmode. */
103+
for (index = 0; index < SLB_NUM_BOLTED; index++) {
104+
asm volatile("slbmte %0,%1" :
105+
: "r" (be64_to_cpu(p->save_area[index].vsid)),
106+
"r" (be64_to_cpu(p->save_area[index].esid)));
107+
}
108+
}
109+
110+
/*
111+
* Insert the bolted entries into an empty SLB.
112+
* This is not the same as rebolt because the bolted segments are not
113+
* changed, just loaded from the shadow area.
114+
*/
115+
void slb_restore_bolted_realmode(void)
116+
{
117+
__slb_restore_bolted_realmode();
118+
get_paca()->slb_cache_ptr = 0;
119+
}
120+
121+
/*
122+
* This flushes all SLB entries including 0, so it must be realmode.
123+
*/
124+
void slb_flush_all_realmode(void)
125+
{
126+
/*
127+
* This flushes all SLB entries including 0, so it must be realmode.
128+
*/
129+
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
130+
}
131+
93132
static void __slb_flush_and_rebolt(void)
94133
{
95134
/* If you change this make sure you change SLB_NUM_BOLTED

0 commit comments

Comments
 (0)