Skip to content

Commit f9d4861

Browse files
wildea01Russell King
authored andcommitted
ARM: 7294/1: vectors: use gate_vma for vectors user mapping
The current user mapping for the vectors page is inserted as a `horrible hack vma' into each task via arch_setup_additional_pages. This causes problems with the MM subsystem and vm_normal_page, as described here: https://lkml.org/lkml/2012/1/14/55 Following the suggestion from Hugh in the above thread, this patch uses the gate_vma for the vectors user mapping, therefore consolidating the horrible hack VMAs into one. Acked-and-Tested-by: Nicolas Pitre <[email protected]> Signed-off-by: Will Deacon <[email protected]> Signed-off-by: Russell King <[email protected]>
1 parent 195864c commit f9d4861

File tree

4 files changed

+31
-42
lines changed

4 files changed

+31
-42
lines changed

arch/arm/include/asm/elf.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,4 @@ struct mm_struct;
130130
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
131131
#define arch_randomize_brk arch_randomize_brk
132132

133-
extern int vectors_user_mapping(void);
134-
#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
135-
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
136-
137133
#endif

arch/arm/include/asm/mmu_context.h

Lines changed: 1 addition & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include <asm/cacheflush.h>
1919
#include <asm/cachetype.h>
2020
#include <asm/proc-fns.h>
21+
#include <asm-generic/mm_hooks.h>
2122

2223
void __check_kvm_seq(struct mm_struct *mm);
2324

@@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
133134
#define deactivate_mm(tsk,mm) do { } while (0)
134135
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
135136

136-
/*
137-
* We are inserting a "fake" vma for the user-accessible vector page so
138-
* gdb and friends can get to it through ptrace and /proc/<pid>/mem.
139-
* But we also want to remove it before the generic code gets to see it
140-
* during process exit or the unmapping of it would cause total havoc.
141-
* (the macro is used as remove_vma() is static to mm/mmap.c)
142-
*/
143-
#define arch_exit_mmap(mm) \
144-
do { \
145-
struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
146-
if (high_vma) { \
147-
BUG_ON(high_vma->vm_next); /* it should be last */ \
148-
if (high_vma->vm_prev) \
149-
high_vma->vm_prev->vm_next = NULL; \
150-
else \
151-
mm->mmap = NULL; \
152-
rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
153-
mm->mmap_cache = NULL; \
154-
mm->map_count--; \
155-
remove_vma(high_vma); \
156-
} \
157-
} while (0)
158-
159-
static inline void arch_dup_mmap(struct mm_struct *oldmm,
160-
struct mm_struct *mm)
161-
{
162-
}
163-
164137
#endif

arch/arm/include/asm/page.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
151151
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
152152
extern void copy_page(void *to, const void *from);
153153

154+
#define __HAVE_ARCH_GATE_AREA 1
155+
154156
#ifdef CONFIG_ARM_LPAE
155157
#include <asm/pgtable-3level-types.h>
156158
#else

arch/arm/kernel/process.c

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -526,22 +526,40 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
526526
#ifdef CONFIG_MMU
527527
/*
528528
* The vectors page is always readable from user space for the
529-
* atomic helpers and the signal restart code. Let's declare a mapping
530-
* for it so it is visible through ptrace and /proc/<pid>/mem.
529+
* atomic helpers and the signal restart code. Insert it into the
530+
* gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
531531
*/
532+
static struct vm_area_struct gate_vma;
532533

533-
int vectors_user_mapping(void)
534+
static int __init gate_vma_init(void)
534535
{
535-
struct mm_struct *mm = current->mm;
536-
return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
537-
VM_READ | VM_EXEC |
538-
VM_MAYREAD | VM_MAYEXEC |
539-
VM_ALWAYSDUMP | VM_RESERVED,
540-
NULL);
536+
gate_vma.vm_start = 0xffff0000;
537+
gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
538+
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
539+
gate_vma.vm_flags = VM_READ | VM_EXEC |
540+
VM_MAYREAD | VM_MAYEXEC |
541+
VM_ALWAYSDUMP;
542+
return 0;
543+
}
544+
arch_initcall(gate_vma_init);
545+
546+
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
547+
{
548+
return &gate_vma;
549+
}
550+
551+
int in_gate_area(struct mm_struct *mm, unsigned long addr)
552+
{
553+
return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
554+
}
555+
556+
int in_gate_area_no_mm(unsigned long addr)
557+
{
558+
return in_gate_area(NULL, addr);
541559
}
542560

543561
const char *arch_vma_name(struct vm_area_struct *vma)
544562
{
545-
return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
563+
return (vma == &gate_vma) ? "[vectors]" : NULL;
546564
}
547565
#endif

0 commit comments

Comments
 (0)