@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
152
152
#define ESR_CM (1 << 8)
153
153
#define ESR_LNX_EXEC (1 << 24)
154
154
155
- /*
156
- * Check that the permissions on the VMA allow for the fault which occurred.
157
- * If we encountered a write fault, we must have write permission, otherwise
158
- * we allow any permission.
159
- */
160
- static inline bool access_error (unsigned int esr , struct vm_area_struct * vma )
161
- {
162
- unsigned int mask = VM_READ | VM_WRITE | VM_EXEC ;
163
-
164
- if (esr & ESR_WRITE )
165
- mask = VM_WRITE ;
166
- if (esr & ESR_LNX_EXEC )
167
- mask = VM_EXEC ;
168
-
169
- return vma -> vm_flags & mask ? false : true;
170
- }
171
-
172
155
static int __do_page_fault (struct mm_struct * mm , unsigned long addr ,
173
- unsigned int esr , unsigned int flags ,
156
+ unsigned int mm_flags , unsigned long vm_flags ,
174
157
struct task_struct * tsk )
175
158
{
176
159
struct vm_area_struct * vma ;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
188
171
* it.
189
172
*/
190
173
good_area :
191
- if (access_error (esr , vma )) {
174
+ /*
175
+ * Check that the permissions on the VMA allow for the fault which
176
+ * occurred. If we encountered a write or exec fault, we must have
177
+ * appropriate permissions, otherwise we allow any permission.
178
+ */
179
+ if (!(vma -> vm_flags & vm_flags )) {
192
180
fault = VM_FAULT_BADACCESS ;
193
181
goto out ;
194
182
}
195
183
196
- return handle_mm_fault (mm , vma , addr & PAGE_MASK , flags );
184
+ return handle_mm_fault (mm , vma , addr & PAGE_MASK , mm_flags );
197
185
198
186
check_stack :
199
187
if (vma -> vm_flags & VM_GROWSDOWN && !expand_stack (vma , addr ))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
208
196
struct task_struct * tsk ;
209
197
struct mm_struct * mm ;
210
198
int fault , sig , code ;
211
- bool write = (esr & ESR_WRITE ) && !(esr & ESR_CM );
212
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
213
- (write ? FAULT_FLAG_WRITE : 0 );
199
+ unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC ;
200
+ unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE ;
201
+
202
+ if (esr & ESR_LNX_EXEC ) {
203
+ vm_flags = VM_EXEC ;
204
+ } else if ((esr & ESR_WRITE ) && !(esr & ESR_CM )) {
205
+ vm_flags = VM_WRITE ;
206
+ mm_flags |= FAULT_FLAG_WRITE ;
207
+ }
214
208
215
209
tsk = current ;
216
210
mm = tsk -> mm ;
@@ -248,7 +242,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
248
242
#endif
249
243
}
250
244
251
- fault = __do_page_fault (mm , addr , esr , flags , tsk );
245
+ fault = __do_page_fault (mm , addr , mm_flags , vm_flags , tsk );
252
246
253
247
/*
254
248
* If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
265
259
*/
266
260
267
261
perf_sw_event (PERF_COUNT_SW_PAGE_FAULTS , 1 , regs , addr );
268
- if (flags & FAULT_FLAG_ALLOW_RETRY ) {
262
+ if (mm_flags & FAULT_FLAG_ALLOW_RETRY ) {
269
263
if (fault & VM_FAULT_MAJOR ) {
270
264
tsk -> maj_flt ++ ;
271
265
perf_sw_event (PERF_COUNT_SW_PAGE_FAULTS_MAJ , 1 , regs ,
@@ -280,7 +274,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
280
274
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
281
275
* starvation.
282
276
*/
283
- flags &= ~FAULT_FLAG_ALLOW_RETRY ;
277
+ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY ;
284
278
goto retry ;
285
279
}
286
280
}
0 commit comments