Skip to content

Commit a050ba1

Browse files
committed
mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
This does the simple pattern conversion of alpha, arc, csky, hexagon, loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma() helper. They all have the regular fault handling pattern without odd special cases. The remaining architectures all have something that keeps us from a straightforward conversion: ia64 and parisc have stacks that can grow both up as well as down (and ia64 has special address region checks). And m68k, microblaze, openrisc, sparc64, and um end up having extra rules about only expanding the stack down a limited amount below the user space stack pointer. That is something that x86 used to do too (long long ago), and it probably could just be skipped, but it still makes the conversion less than trivial. Note that this conversion was done manually and with the exception of alpha without any build testing, because I have a fairly limited cross- building environment. The cases are all simple, and I went through the changes several times, but... Signed-off-by: Linus Torvalds <[email protected]>
1 parent 8b35ca3 commit a050ba1

File tree

18 files changed

+45
-124
lines changed

18 files changed

+45
-124
lines changed

arch/alpha/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ config ALPHA
3030
select HAS_IOPORT
3131
select HAVE_ARCH_AUDITSYSCALL
3232
select HAVE_MOD_ARCH_SPECIFIC
33+
select LOCK_MM_AND_FIND_VMA
3334
select MODULES_USE_ELF_RELA
3435
select ODD_RT_SIGACTION
3536
select OLD_SIGSUSPEND

arch/alpha/mm/fault.c

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
119119
flags |= FAULT_FLAG_USER;
120120
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
121121
retry:
122-
mmap_read_lock(mm);
123-
vma = find_vma(mm, address);
122+
vma = lock_mm_and_find_vma(mm, address, regs);
124123
if (!vma)
125-
goto bad_area;
126-
if (vma->vm_start <= address)
127-
goto good_area;
128-
if (!(vma->vm_flags & VM_GROWSDOWN))
129-
goto bad_area;
130-
if (expand_stack(vma, address))
131-
goto bad_area;
124+
goto bad_area_nosemaphore;
132125

133126
/* Ok, we have a good vm_area for this memory access, so
134127
we can handle it. */
135-
good_area:
136128
si_code = SEGV_ACCERR;
137129
if (cause < 0) {
138130
if (!(vma->vm_flags & VM_EXEC))
@@ -192,6 +184,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
192184
bad_area:
193185
mmap_read_unlock(mm);
194186

187+
bad_area_nosemaphore:
195188
if (user_mode(regs))
196189
goto do_sigsegv;
197190

arch/arc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ config ARC
4141
select HAVE_PERF_EVENTS
4242
select HAVE_SYSCALL_TRACEPOINTS
4343
select IRQ_DOMAIN
44+
select LOCK_MM_AND_FIND_VMA
4445
select MODULES_USE_ELF_RELA
4546
select OF
4647
select OF_EARLY_FLATTREE

arch/arc/mm/fault.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
113113

114114
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
115115
retry:
116-
mmap_read_lock(mm);
117-
118-
vma = find_vma(mm, address);
116+
vma = lock_mm_and_find_vma(mm, address, regs);
119117
if (!vma)
120-
goto bad_area;
121-
if (unlikely(address < vma->vm_start)) {
122-
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
123-
goto bad_area;
124-
}
118+
goto bad_area_nosemaphore;
125119

126120
/*
127121
* vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
161155
bad_area:
162156
mmap_read_unlock(mm);
163157

158+
bad_area_nosemaphore:
164159
/*
165160
* Major/minor page fault accounting
166161
* (in case of retry we only land here once)

arch/csky/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ config CSKY
9696
select HAVE_REGS_AND_STACK_ACCESS_API
9797
select HAVE_STACKPROTECTOR
9898
select HAVE_SYSCALL_TRACEPOINTS
99+
select LOCK_MM_AND_FIND_VMA
99100
select MAY_HAVE_SPARSE_IRQ
100101
select MODULES_USE_ELF_RELA if MODULES
101102
select OF

arch/csky/mm/fault.c

Lines changed: 5 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
9797
BUG();
9898
}
9999

100-
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
100+
static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101101
{
102102
/*
103103
* Something tried to access memory that isn't in our memory map.
104104
* Fix it, but check if it's kernel or user first.
105105
*/
106-
mmap_read_unlock(mm);
107106
/* User mode accesses just cause a SIGSEGV */
108107
if (user_mode(regs)) {
109108
do_trap(regs, SIGSEGV, code, addr);
@@ -238,32 +237,21 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
238237
if (is_write(regs))
239238
flags |= FAULT_FLAG_WRITE;
240239
retry:
241-
mmap_read_lock(mm);
242-
vma = find_vma(mm, addr);
240+
vma = lock_mm_and_find_vma(mm, address, regs);
243241
if (unlikely(!vma)) {
244-
bad_area(regs, mm, code, addr);
245-
return;
246-
}
247-
if (likely(vma->vm_start <= addr))
248-
goto good_area;
249-
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250-
bad_area(regs, mm, code, addr);
251-
return;
252-
}
253-
if (unlikely(expand_stack(vma, addr))) {
254-
bad_area(regs, mm, code, addr);
242+
bad_area_nosemaphore(regs, mm, code, addr);
255243
return;
256244
}
257245

258246
/*
259247
* Ok, we have a good vm_area for this memory access, so
260248
* we can handle it.
261249
*/
262-
good_area:
263250
code = SEGV_ACCERR;
264251

265252
if (unlikely(access_error(regs, vma))) {
266-
bad_area(regs, mm, code, addr);
253+
mmap_read_unlock(mm);
254+
bad_area_nosemaphore(regs, mm, code, addr);
267255
return;
268256
}
269257

arch/hexagon/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ config HEXAGON
2828
select GENERIC_SMP_IDLE_THREAD
2929
select STACKTRACE_SUPPORT
3030
select GENERIC_CLOCKEVENTS_BROADCAST
31+
select LOCK_MM_AND_FIND_VMA
3132
select MODULES_USE_ELF_RELA
3233
select GENERIC_CPU_DEVICES
3334
select ARCH_WANT_LD_ORPHAN_WARN

arch/hexagon/mm/vm_fault.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
5757

5858
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
5959
retry:
60-
mmap_read_lock(mm);
61-
vma = find_vma(mm, address);
62-
if (!vma)
63-
goto bad_area;
60+
vma = lock_mm_and_find_vma(mm, address, regs);
61+
if (unlikely(!vma))
62+
goto bad_area_nosemaphore;
6463

65-
if (vma->vm_start <= address)
66-
goto good_area;
67-
68-
if (!(vma->vm_flags & VM_GROWSDOWN))
69-
goto bad_area;
70-
71-
if (expand_stack(vma, address))
72-
goto bad_area;
73-
74-
good_area:
7564
/* Address space is OK. Now check access rights. */
7665
si_code = SEGV_ACCERR;
7766

@@ -143,6 +132,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
143132
bad_area:
144133
mmap_read_unlock(mm);
145134

135+
bad_area_nosemaphore:
146136
if (user_mode(regs)) {
147137
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
148138
return;

arch/loongarch/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ config LOONGARCH
130130
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
131131
select IRQ_FORCED_THREADING
132132
select IRQ_LOONGARCH_CPU
133+
select LOCK_MM_AND_FIND_VMA
133134
select MMU_GATHER_MERGE_VMAS if MMU
134135
select MODULES_USE_ELF_RELA if MODULES
135136
select NEED_PER_CPU_EMBED_FIRST_CHUNK

arch/loongarch/mm/fault.c

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -169,22 +169,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
169169

170170
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
171171
retry:
172-
mmap_read_lock(mm);
173-
vma = find_vma(mm, address);
174-
if (!vma)
175-
goto bad_area;
176-
if (vma->vm_start <= address)
177-
goto good_area;
178-
if (!(vma->vm_flags & VM_GROWSDOWN))
179-
goto bad_area;
180-
if (!expand_stack(vma, address))
181-
goto good_area;
172+
vma = lock_mm_and_find_vma(mm, address, regs);
173+
if (unlikely(!vma))
174+
goto bad_area_nosemaphore;
175+
goto good_area;
176+
182177
/*
183178
* Something tried to access memory that isn't in our memory map..
184179
* Fix it, but check if it's kernel or user first..
185180
*/
186181
bad_area:
187182
mmap_read_unlock(mm);
183+
bad_area_nosemaphore:
188184
do_sigsegv(regs, write, address, si_code);
189185
return;
190186

arch/nios2/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ config NIOS2
1616
select HAVE_ARCH_TRACEHOOK
1717
select HAVE_ARCH_KGDB
1818
select IRQ_DOMAIN
19+
select LOCK_MM_AND_FIND_VMA
1920
select MODULES_USE_ELF_RELA
2021
select OF
2122
select OF_EARLY_FLATTREE

arch/nios2/mm/fault.c

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
8686

8787
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
8888

89-
if (!mmap_read_trylock(mm)) {
90-
if (!user_mode(regs) && !search_exception_tables(regs->ea))
91-
goto bad_area_nosemaphore;
9289
retry:
93-
mmap_read_lock(mm);
94-
}
95-
96-
vma = find_vma(mm, address);
90+
vma = lock_mm_and_find_vma(mm, address, regs);
9791
if (!vma)
98-
goto bad_area;
99-
if (vma->vm_start <= address)
100-
goto good_area;
101-
if (!(vma->vm_flags & VM_GROWSDOWN))
102-
goto bad_area;
103-
if (expand_stack(vma, address))
104-
goto bad_area;
92+
goto bad_area_nosemaphore;
10593
/*
10694
* Ok, we have a good vm_area for this memory access, so
10795
* we can handle it..
10896
*/
109-
good_area:
11097
code = SEGV_ACCERR;
11198

11299
switch (cause) {

arch/sh/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ config SUPERH
5959
select HAVE_STACKPROTECTOR
6060
select HAVE_SYSCALL_TRACEPOINTS
6161
select IRQ_FORCED_THREADING
62+
select LOCK_MM_AND_FIND_VMA
6263
select MODULES_USE_ELF_RELA
6364
select NEED_SG_DMA_LENGTH
6465
select NO_DMA if !MMU && !DMA_COHERENT

arch/sh/mm/fault.c

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -439,29 +439,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
439439
}
440440

441441
retry:
442-
mmap_read_lock(mm);
443-
444-
vma = find_vma(mm, address);
442+
vma = lock_mm_and_find_vma(mm, address, regs);
445443
if (unlikely(!vma)) {
446-
bad_area(regs, error_code, address);
447-
return;
448-
}
449-
if (likely(vma->vm_start <= address))
450-
goto good_area;
451-
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
452-
bad_area(regs, error_code, address);
453-
return;
454-
}
455-
if (unlikely(expand_stack(vma, address))) {
456-
bad_area(regs, error_code, address);
444+
bad_area_nosemaphore(regs, error_code, address);
457445
return;
458446
}
459447

460448
/*
461449
* Ok, we have a good vm_area for this memory access, so
462450
* we can handle it..
463451
*/
464-
good_area:
465452
if (unlikely(access_error(error_code, vma))) {
466453
bad_area_access_error(regs, error_code, address);
467454
return;

arch/sparc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ config SPARC32
5757
select DMA_DIRECT_REMAP
5858
select GENERIC_ATOMIC64
5959
select HAVE_UID16
60+
select LOCK_MM_AND_FIND_VMA
6061
select OLD_SIGACTION
6162
select ZONE_DMA
6263

arch/sparc/mm/fault_32.c

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
143143
if (pagefault_disabled() || !mm)
144144
goto no_context;
145145

146+
if (!from_user && address >= PAGE_OFFSET)
147+
goto no_context;
148+
146149
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
147150

148151
retry:
149-
mmap_read_lock(mm);
150-
151-
if (!from_user && address >= PAGE_OFFSET)
152-
goto bad_area;
153-
154-
vma = find_vma(mm, address);
152+
vma = lock_mm_and_find_vma(mm, address, regs);
155153
if (!vma)
156-
goto bad_area;
157-
if (vma->vm_start <= address)
158-
goto good_area;
159-
if (!(vma->vm_flags & VM_GROWSDOWN))
160-
goto bad_area;
161-
if (expand_stack(vma, address))
162-
goto bad_area;
154+
goto bad_area_nosemaphore;
163155
/*
164156
* Ok, we have a good vm_area for this memory access, so
165157
* we can handle it..
166158
*/
167-
good_area:
168159
code = SEGV_ACCERR;
169160
if (write) {
170161
if (!(vma->vm_flags & VM_WRITE))
@@ -321,17 +312,9 @@ static void force_user_fault(unsigned long address, int write)
321312

322313
code = SEGV_MAPERR;
323314

324-
mmap_read_lock(mm);
325-
vma = find_vma(mm, address);
315+
vma = lock_mm_and_find_vma(mm, address, regs);
326316
if (!vma)
327-
goto bad_area;
328-
if (vma->vm_start <= address)
329-
goto good_area;
330-
if (!(vma->vm_flags & VM_GROWSDOWN))
331-
goto bad_area;
332-
if (expand_stack(vma, address))
333-
goto bad_area;
334-
good_area:
317+
goto bad_area_nosemaphore;
335318
code = SEGV_ACCERR;
336319
if (write) {
337320
if (!(vma->vm_flags & VM_WRITE))
@@ -350,6 +333,7 @@ static void force_user_fault(unsigned long address, int write)
350333
return;
351334
bad_area:
352335
mmap_read_unlock(mm);
336+
bad_area_nosemaphore:
353337
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
354338
return;
355339

arch/xtensa/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ config XTENSA
4949
select HAVE_SYSCALL_TRACEPOINTS
5050
select HAVE_VIRT_CPU_ACCOUNTING_GEN
5151
select IRQ_DOMAIN
52+
select LOCK_MM_AND_FIND_VMA
5253
select MODULES_USE_ELF_RELA
5354
select PERF_USE_VMALLOC
5455
select TRACE_IRQFLAGS_SUPPORT

0 commit comments

Comments
 (0)