12
12
#include <linux/binfmts.h>
13
13
#include <linux/err.h>
14
14
#include <asm/page.h>
15
+ #include <asm/vdso.h>
16
+
15
17
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
16
18
#include <vdso/datapage.h>
17
19
#else
18
- #include <asm/vdso.h>
20
+ struct vdso_data {
21
+ };
19
22
#endif
20
23
21
24
extern char vdso_start [], vdso_end [];
22
25
26
+ enum vvar_pages {
27
+ VVAR_DATA_PAGE_OFFSET ,
28
+ VVAR_NR_PAGES ,
29
+ };
30
+
31
+ #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
32
+
23
33
static unsigned int vdso_pages __ro_after_init ;
24
34
static struct page * * vdso_pagelist __ro_after_init ;
25
35
@@ -38,7 +48,7 @@ static int __init vdso_init(void)
38
48
39
49
vdso_pages = (vdso_end - vdso_start ) >> PAGE_SHIFT ;
40
50
vdso_pagelist =
41
- kcalloc (vdso_pages + 1 , sizeof (struct page * ), GFP_KERNEL );
51
+ kcalloc (vdso_pages + VVAR_NR_PAGES , sizeof (struct page * ), GFP_KERNEL );
42
52
if (unlikely (vdso_pagelist == NULL )) {
43
53
pr_err ("vdso: pagelist allocation failed\n" );
44
54
return - ENOMEM ;
@@ -63,38 +73,41 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
63
73
unsigned long vdso_base , vdso_len ;
64
74
int ret ;
65
75
66
- vdso_len = (vdso_pages + 1 ) << PAGE_SHIFT ;
76
+ BUILD_BUG_ON (VVAR_NR_PAGES != __VVAR_PAGES );
77
+
78
+ vdso_len = (vdso_pages + VVAR_NR_PAGES ) << PAGE_SHIFT ;
79
+
80
+ if (mmap_write_lock_killable (mm ))
81
+ return - EINTR ;
67
82
68
- mmap_write_lock (mm );
69
83
vdso_base = get_unmapped_area (NULL , 0 , vdso_len , 0 , 0 );
70
84
if (IS_ERR_VALUE (vdso_base )) {
71
85
ret = vdso_base ;
72
86
goto end ;
73
87
}
74
88
75
- /*
76
- * Put vDSO base into mm struct. We need to do this before calling
77
- * install_special_mapping or the perf counter mmap tracking code
78
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
79
- */
80
- mm -> context .vdso = (void * )vdso_base ;
89
+ mm -> context .vdso = NULL ;
90
+ ret = install_special_mapping (mm , vdso_base , VVAR_SIZE ,
91
+ (VM_READ | VM_MAYREAD ), & vdso_pagelist [vdso_pages ]);
92
+ if (unlikely (ret ))
93
+ goto end ;
81
94
82
95
ret =
83
- install_special_mapping (mm , vdso_base , vdso_pages << PAGE_SHIFT ,
96
+ install_special_mapping (mm , vdso_base + VVAR_SIZE ,
97
+ vdso_pages << PAGE_SHIFT ,
84
98
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC ),
85
99
vdso_pagelist );
86
100
87
- if (unlikely (ret )) {
88
- mm -> context .vdso = NULL ;
101
+ if (unlikely (ret ))
89
102
goto end ;
90
- }
91
103
92
- vdso_base += (vdso_pages << PAGE_SHIFT );
93
- ret = install_special_mapping (mm , vdso_base , PAGE_SIZE ,
94
- (VM_READ | VM_MAYREAD ), & vdso_pagelist [vdso_pages ]);
104
+ /*
105
+ * Put vDSO base into mm struct. We need to do this before calling
106
+ * install_special_mapping or the perf counter mmap tracking code
107
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
108
+ */
109
+ mm -> context .vdso = (void * )vdso_base + VVAR_SIZE ;
95
110
96
- if (unlikely (ret ))
97
- mm -> context .vdso = NULL ;
98
111
end :
99
112
mmap_write_unlock (mm );
100
113
return ret ;
@@ -105,7 +118,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
105
118
if (vma -> vm_mm && (vma -> vm_start == (long )vma -> vm_mm -> context .vdso ))
106
119
return "[vdso]" ;
107
120
if (vma -> vm_mm && (vma -> vm_start ==
108
- (long )vma -> vm_mm -> context .vdso + PAGE_SIZE ))
121
+ (long )vma -> vm_mm -> context .vdso - VVAR_SIZE ))
109
122
return "[vdso_data]" ;
110
123
return NULL ;
111
124
}
0 commit comments