@@ -42,36 +42,69 @@ static inline void kuep_unlock(void)
42
42
43
43
#include <linux/sched.h>
44
44
45
- static inline void kuap_update_sr (u32 sr , u32 addr , u32 end )
46
- {
47
- addr &= 0xf0000000 ; /* align addr to start of segment */
48
- barrier (); /* make sure thread.kuap is updated before playing with SRs */
49
- for (;;) {
50
- mtsr (sr , addr );
51
- addr += 0x10000000 ; /* address of next segment */
52
- if (addr >= end )
53
- break ;
54
- sr += 0x111 ; /* next VSID */
55
- sr &= 0xf0ffffff ; /* clear VSID overflow */
56
- }
45
+ #define KUAP_NONE (~0UL)
46
+ #define KUAP_ALL (~1UL)
47
+
48
+ static inline void kuap_lock_one (unsigned long addr )
49
+ {
50
+ mtsr (mfsr (addr ) | SR_KS , addr );
51
+ isync (); /* Context sync required after mtsr() */
52
+ }
53
+
54
+ static inline void kuap_unlock_one (unsigned long addr )
55
+ {
56
+ mtsr (mfsr (addr ) & ~SR_KS , addr );
57
+ isync (); /* Context sync required after mtsr() */
58
+ }
59
+
60
+ static inline void kuap_lock_all (void )
61
+ {
62
+ update_user_segments (mfsr (0 ) | SR_KS );
57
63
isync (); /* Context sync required after mtsr() */
58
64
}
59
65
66
+ static inline void kuap_unlock_all (void )
67
+ {
68
+ update_user_segments (mfsr (0 ) & ~SR_KS );
69
+ isync (); /* Context sync required after mtsr() */
70
+ }
71
+
72
+ void kuap_lock_all_ool (void );
73
+ void kuap_unlock_all_ool (void );
74
+
75
+ static inline void kuap_lock (unsigned long addr , bool ool )
76
+ {
77
+ if (likely (addr != KUAP_ALL ))
78
+ kuap_lock_one (addr );
79
+ else if (!ool )
80
+ kuap_lock_all ();
81
+ else
82
+ kuap_lock_all_ool ();
83
+ }
84
+
85
+ static inline void kuap_unlock (unsigned long addr , bool ool )
86
+ {
87
+ if (likely (addr != KUAP_ALL ))
88
+ kuap_unlock_one (addr );
89
+ else if (!ool )
90
+ kuap_unlock_all ();
91
+ else
92
+ kuap_unlock_all_ool ();
93
+ }
94
+
60
95
static inline void kuap_save_and_lock (struct pt_regs * regs )
61
96
{
62
97
unsigned long kuap = current -> thread .kuap ;
63
- u32 addr = kuap & 0xf0000000 ;
64
- u32 end = kuap << 28 ;
65
98
66
99
if (kuap_is_disabled ())
67
100
return ;
68
101
69
102
regs -> kuap = kuap ;
70
- if (unlikely (! kuap ))
103
+ if (unlikely (kuap == KUAP_NONE ))
71
104
return ;
72
105
73
- current -> thread .kuap = 0 ;
74
- kuap_update_sr ( mfsr ( addr ) | SR_KS , addr , end ); /* Set Ks */
106
+ current -> thread .kuap = KUAP_NONE ;
107
+ kuap_lock ( kuap , false);
75
108
}
76
109
77
110
static inline void kuap_user_restore (struct pt_regs * regs )
@@ -80,28 +113,22 @@ static inline void kuap_user_restore(struct pt_regs *regs)
80
113
81
114
static inline void kuap_kernel_restore (struct pt_regs * regs , unsigned long kuap )
82
115
{
83
- u32 addr = regs -> kuap & 0xf0000000 ;
84
- u32 end = regs -> kuap << 28 ;
85
-
86
116
if (kuap_is_disabled ())
87
117
return ;
88
118
89
119
current -> thread .kuap = regs -> kuap ;
90
120
91
- if (unlikely (regs -> kuap == kuap ))
92
- return ;
93
-
94
- kuap_update_sr (mfsr (addr ) & ~SR_KS , addr , end ); /* Clear Ks */
121
+ kuap_unlock (regs -> kuap , false);
95
122
}
96
123
97
124
static inline unsigned long kuap_get_and_assert_locked (void )
98
125
{
99
126
unsigned long kuap = current -> thread .kuap ;
100
127
101
128
if (kuap_is_disabled ())
102
- return 0 ;
129
+ return KUAP_NONE ;
103
130
104
- WARN_ON_ONCE (IS_ENABLED (CONFIG_PPC_KUAP_DEBUG ) && kuap != 0 );
131
+ WARN_ON_ONCE (IS_ENABLED (CONFIG_PPC_KUAP_DEBUG ) && kuap != KUAP_NONE );
105
132
106
133
return kuap ;
107
134
}
@@ -114,8 +141,6 @@ static inline void kuap_assert_locked(void)
114
141
static __always_inline void allow_user_access (void __user * to , const void __user * from ,
115
142
u32 size , unsigned long dir )
116
143
{
117
- u32 addr , end ;
118
-
119
144
if (kuap_is_disabled ())
120
145
return ;
121
146
@@ -125,88 +150,71 @@ static __always_inline void allow_user_access(void __user *to, const void __user
125
150
if (!(dir & KUAP_WRITE ))
126
151
return ;
127
152
128
- addr = (__force u32 )to ;
129
-
130
- if (unlikely (addr >= TASK_SIZE || !size ))
131
- return ;
132
-
133
- end = min (addr + size , TASK_SIZE );
134
-
135
- current -> thread .kuap = (addr & 0xf0000000 ) | ((((end - 1 ) >> 28 ) + 1 ) & 0xf );
136
- kuap_update_sr (mfsr (addr ) & ~SR_KS , addr , end ); /* Clear Ks */
153
+ current -> thread .kuap = (__force u32 )to ;
154
+ kuap_unlock_one ((__force u32 )to );
137
155
}
138
156
139
157
static __always_inline void prevent_user_access (void __user * to , const void __user * from ,
140
158
u32 size , unsigned long dir )
141
159
{
142
- u32 addr , end ;
160
+ u32 kuap = current -> thread . kuap ;
143
161
144
162
if (kuap_is_disabled ())
145
163
return ;
146
164
147
165
BUILD_BUG_ON (!__builtin_constant_p (dir ));
148
166
149
- if (dir & KUAP_CURRENT_WRITE ) {
150
- u32 kuap = current -> thread .kuap ;
151
-
152
- if (unlikely (!kuap ))
153
- return ;
154
-
155
- addr = kuap & 0xf0000000 ;
156
- end = kuap << 28 ;
157
- } else if (dir & KUAP_WRITE ) {
158
- addr = (__force u32 )to ;
159
- end = min (addr + size , TASK_SIZE );
160
-
161
- if (unlikely (addr >= TASK_SIZE || !size ))
162
- return ;
163
- } else {
167
+ if (!(dir & KUAP_WRITE ))
164
168
return ;
165
- }
166
169
167
- current -> thread .kuap = 0 ;
168
- kuap_update_sr ( mfsr ( addr ) | SR_KS , addr , end ); /* set Ks */
170
+ current -> thread .kuap = KUAP_NONE ;
171
+ kuap_lock ( kuap , true);
169
172
}
170
173
171
174
static inline unsigned long prevent_user_access_return (void )
172
175
{
173
176
unsigned long flags = current -> thread .kuap ;
174
- unsigned long addr = flags & 0xf0000000 ;
175
- unsigned long end = flags << 28 ;
176
- void __user * to = (__force void __user * )addr ;
177
177
178
178
if (kuap_is_disabled ())
179
- return 0 ;
179
+ return KUAP_NONE ;
180
180
181
- if (flags )
182
- prevent_user_access (to , to , end - addr , KUAP_READ_WRITE );
181
+ if (flags != KUAP_NONE ) {
182
+ current -> thread .kuap = KUAP_NONE ;
183
+ kuap_lock (flags , true);
184
+ }
183
185
184
186
return flags ;
185
187
}
186
188
187
189
static inline void restore_user_access (unsigned long flags )
188
190
{
189
- unsigned long addr = flags & 0xf0000000 ;
190
- unsigned long end = flags << 28 ;
191
- void __user * to = (__force void __user * )addr ;
192
-
193
191
if (kuap_is_disabled ())
194
192
return ;
195
193
196
- if (flags )
197
- allow_user_access (to , to , end - addr , KUAP_READ_WRITE );
194
+ if (flags != KUAP_NONE ) {
195
+ current -> thread .kuap = flags ;
196
+ kuap_unlock (flags , true);
197
+ }
198
198
}
199
199
200
200
static inline bool
201
201
bad_kuap_fault (struct pt_regs * regs , unsigned long address , bool is_write )
202
202
{
203
- unsigned long begin = regs -> kuap & 0xf0000000 ;
204
- unsigned long end = regs -> kuap << 28 ;
203
+ unsigned long kuap = regs -> kuap ;
205
204
206
205
if (kuap_is_disabled ())
207
206
return false;
208
207
209
- return is_write && (address < begin || address >= end );
208
+ if (!is_write || kuap == KUAP_ALL )
209
+ return false;
210
+ if (kuap == KUAP_NONE )
211
+ return true;
212
+
213
+ /* If faulting address doesn't match unlocked segment, unlock all */
214
+ if ((kuap ^ address ) & 0xf0000000 )
215
+ regs -> kuap = KUAP_ALL ;
216
+
217
+ return false;
210
218
}
211
219
212
220
#endif /* CONFIG_PPC_KUAP */
0 commit comments