Skip to content

Commit 73d16a6

Browse files
Ian Munsiempe
authored andcommitted
powerpc/cell: Move data segment faulting code out of cell platform
__spu_trap_data_seg() currently contains code to determine the VSID and ESID required for a particular EA and mm struct. This code is generically useful for other co-processors. This moves the code of the cell platform so it can be used by other powerpc code. It also adds 1TB segment handling which Cell didn't support. The new function is called copro_calculate_slb(). This also moves the internal struct spu_slb to a generic struct copro_slb which is now used in the Cell and copro code. We use this new struct instead of passing around esid and vsid parameters. Signed-off-by: Ian Munsie <[email protected]> Signed-off-by: Michael Neuling <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
1 parent e83d016 commit 73d16a6

File tree

5 files changed

+69
-49
lines changed

5 files changed

+69
-49
lines changed

arch/powerpc/include/asm/copro.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,14 @@
1010
#ifndef _ASM_POWERPC_COPRO_H
1111
#define _ASM_POWERPC_COPRO_H
1212

13+
struct copro_slb
14+
{
15+
u64 esid, vsid;
16+
};
17+
1318
int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
1419
unsigned long dsisr, unsigned *flt);
1520

21+
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
22+
1623
#endif /* _ASM_POWERPC_COPRO_H */

arch/powerpc/include/asm/mmu-hash64.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,13 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
190190

191191
#ifndef __ASSEMBLY__
192192

193+
static inline int slb_vsid_shift(int ssize)
194+
{
195+
if (ssize == MMU_SEGSIZE_256M)
196+
return SLB_VSID_SHIFT;
197+
return SLB_VSID_SHIFT_1T;
198+
}
199+
193200
static inline int segment_shift(int ssize)
194201
{
195202
if (ssize == MMU_SEGSIZE_256M)

arch/powerpc/mm/copro_fault.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <linux/mm.h>
2525
#include <linux/export.h>
2626
#include <asm/reg.h>
27+
#include <asm/copro.h>
2728

2829
/*
2930
* This ought to be kept in sync with the powerpc specific do_page_fault
@@ -90,3 +91,48 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
9091
return ret;
9192
}
9293
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
94+
95+
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
96+
{
97+
u64 vsid;
98+
int psize, ssize;
99+
100+
slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
101+
102+
switch (REGION_ID(ea)) {
103+
case USER_REGION_ID:
104+
pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
105+
psize = get_slice_psize(mm, ea);
106+
ssize = user_segment_size(ea);
107+
vsid = get_vsid(mm->context.id, ea, ssize);
108+
break;
109+
case VMALLOC_REGION_ID:
110+
pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
111+
if (ea < VMALLOC_END)
112+
psize = mmu_vmalloc_psize;
113+
else
114+
psize = mmu_io_psize;
115+
ssize = mmu_kernel_ssize;
116+
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
117+
break;
118+
case KERNEL_REGION_ID:
119+
pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
120+
psize = mmu_linear_psize;
121+
ssize = mmu_kernel_ssize;
122+
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
123+
break;
124+
default:
125+
pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
126+
return 1;
127+
}
128+
129+
vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER;
130+
131+
vsid |= mmu_psize_defs[psize].sllp |
132+
((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
133+
134+
slb->vsid = vsid;
135+
136+
return 0;
137+
}
138+
EXPORT_SYMBOL_GPL(copro_calculate_slb);

arch/powerpc/mm/slb.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
4646
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
4747
}
4848

49-
#define slb_vsid_shift(ssize) \
50-
((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
51-
5249
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
5350
unsigned long flags)
5451
{

arch/powerpc/platforms/cell/spu_base.c

Lines changed: 9 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,6 @@ static LIST_HEAD(spu_full_list);
7676
static DEFINE_SPINLOCK(spu_full_list_lock);
7777
static DEFINE_MUTEX(spu_full_list_mutex);
7878

79-
struct spu_slb {
80-
u64 esid, vsid;
81-
};
82-
8379
void spu_invalidate_slbs(struct spu *spu)
8480
{
8581
struct spu_priv2 __iomem *priv2 = spu->priv2;
@@ -149,7 +145,7 @@ static void spu_restart_dma(struct spu *spu)
149145
}
150146
}
151147

152-
static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
148+
static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
153149
{
154150
struct spu_priv2 __iomem *priv2 = spu->priv2;
155151

@@ -167,45 +163,12 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
167163

168164
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
169165
{
170-
struct mm_struct *mm = spu->mm;
171-
struct spu_slb slb;
172-
int psize;
173-
174-
pr_debug("%s\n", __func__);
175-
176-
slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
166+
struct copro_slb slb;
167+
int ret;
177168

178-
switch(REGION_ID(ea)) {
179-
case USER_REGION_ID:
180-
#ifdef CONFIG_PPC_MM_SLICES
181-
psize = get_slice_psize(mm, ea);
182-
#else
183-
psize = mm->context.user_psize;
184-
#endif
185-
slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
186-
<< SLB_VSID_SHIFT) | SLB_VSID_USER;
187-
break;
188-
case VMALLOC_REGION_ID:
189-
if (ea < VMALLOC_END)
190-
psize = mmu_vmalloc_psize;
191-
else
192-
psize = mmu_io_psize;
193-
slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
194-
<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
195-
break;
196-
case KERNEL_REGION_ID:
197-
psize = mmu_linear_psize;
198-
slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
199-
<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
200-
break;
201-
default:
202-
/* Future: support kernel segments so that drivers
203-
* can use SPUs.
204-
*/
205-
pr_debug("invalid region access at %016lx\n", ea);
206-
return 1;
207-
}
208-
slb.vsid |= mmu_psize_defs[psize].sllp;
169+
ret = copro_calculate_slb(spu->mm, ea, &slb);
170+
if (ret)
171+
return ret;
209172

210173
spu_load_slb(spu, spu->slb_replace, &slb);
211174

@@ -253,7 +216,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
253216
return 0;
254217
}
255218

256-
static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
219+
static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
257220
{
258221
unsigned long ea = (unsigned long)addr;
259222
u64 llp;
@@ -272,7 +235,7 @@ static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
272235
* Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
273236
* address @new_addr is present.
274237
*/
275-
static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
238+
static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
276239
void *new_addr)
277240
{
278241
unsigned long ea = (unsigned long)new_addr;
@@ -297,7 +260,7 @@ static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
297260
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
298261
void *code, int code_size)
299262
{
300-
struct spu_slb slbs[4];
263+
struct copro_slb slbs[4];
301264
int i, nr_slbs = 0;
302265
/* start and end addresses of both mappings */
303266
void *addrs[] = {

0 commit comments

Comments
 (0)