@@ -58,6 +58,17 @@ int __init_new_context(void)
58
58
return index ;
59
59
}
60
60
EXPORT_SYMBOL_GPL (__init_new_context );
61
+ static int radix__init_new_context (struct mm_struct * mm , int index )
62
+ {
63
+ unsigned long rts_field ;
64
+
65
+ /*
66
+ * set the process table entry,
67
+ */
68
+ rts_field = 3ull << PPC_BITLSHIFT (2 );
69
+ process_tb [index ].prtb0 = cpu_to_be64 (rts_field | __pa (mm -> pgd ) | RADIX_PGD_INDEX_SIZE );
70
+ return 0 ;
71
+ }
61
72
62
73
int init_new_context (struct task_struct * tsk , struct mm_struct * mm )
63
74
{
@@ -67,13 +78,18 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
67
78
if (index < 0 )
68
79
return index ;
69
80
70
- /* The old code would re-promote on fork, we don't do that
71
- * when using slices as it could cause problem promoting slices
72
- * that have been forced down to 4K
73
- */
74
- if (slice_mm_new_context (mm ))
75
- slice_set_user_psize (mm , mmu_virtual_psize );
76
- subpage_prot_init_new_context (mm );
81
+ if (radix_enabled ()) {
82
+ radix__init_new_context (mm , index );
83
+ } else {
84
+
85
+ /* The old code would re-promote on fork, we don't do that
86
+ * when using slices as it could cause problem promoting slices
87
+ * that have been forced down to 4K
88
+ */
89
+ if (slice_mm_new_context (mm ))
90
+ slice_set_user_psize (mm , mmu_virtual_psize );
91
+ subpage_prot_init_new_context (mm );
92
+ }
77
93
mm -> context .id = index ;
78
94
#ifdef CONFIG_PPC_ICSWX
79
95
mm -> context .cop_lockp = kmalloc (sizeof (spinlock_t ), GFP_KERNEL );
@@ -144,8 +160,19 @@ void destroy_context(struct mm_struct *mm)
144
160
mm -> context .cop_lockp = NULL ;
145
161
#endif /* CONFIG_PPC_ICSWX */
146
162
163
+ if (radix_enabled ())
164
+ process_tb [mm -> context .id ].prtb1 = 0 ;
165
+ else
166
+ subpage_prot_free (mm );
147
167
destroy_pagetable_page (mm );
148
168
__destroy_context (mm -> context .id );
149
- subpage_prot_free (mm );
150
169
mm -> context .id = MMU_NO_CONTEXT ;
151
170
}
171
+
172
+ #ifdef CONFIG_PPC_RADIX_MMU
173
+ void radix__switch_mmu_context (struct mm_struct * prev , struct mm_struct * next )
174
+ {
175
+ mtspr (SPRN_PID , next -> context .id );
176
+ asm volatile ("isync" : : :"memory" );
177
+ }
178
+ #endif
0 commit comments