Skip to content

Commit c88d715

Browse files
kirylIngo Molnar
authored andcommitted
x86/boot/64: Rewrite startup_64() in C
The patch write most of startup_64 logic in C. This is preparation for 5-level paging enabling. Signed-off-by: Kirill A. Shutemov <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brian Gerst <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 34bbb00 commit c88d715

File tree

2 files changed

+87
-93
lines changed

2 files changed

+87
-93
lines changed

arch/x86/kernel/head64.c

Lines changed: 84 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,92 @@
3535
*/
3636
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
3737
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
38-
static unsigned int __initdata next_early_pgt = 2;
38+
static unsigned int __initdata next_early_pgt;
3939
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
4040

41+
static void __init *fixup_pointer(void *ptr, unsigned long physaddr)
42+
{
43+
return ptr - (void *)_text + (void *)physaddr;
44+
}
45+
46+
void __init __startup_64(unsigned long physaddr)
47+
{
48+
unsigned long load_delta, *p;
49+
pgdval_t *pgd;
50+
pudval_t *pud;
51+
pmdval_t *pmd, pmd_entry;
52+
int i;
53+
54+
/* Is the address too large? */
55+
if (physaddr >> MAX_PHYSMEM_BITS)
56+
for (;;);
57+
58+
/*
59+
* Compute the delta between the address I am compiled to run at
60+
* and the address I am actually running at.
61+
*/
62+
load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
63+
64+
/* Is the address not 2M aligned? */
65+
if (load_delta & ~PMD_PAGE_MASK)
66+
for (;;);
67+
68+
/* Fixup the physical addresses in the page table */
69+
70+
pgd = fixup_pointer(&early_level4_pgt, physaddr);
71+
pgd[pgd_index(__START_KERNEL_map)] += load_delta;
72+
73+
pud = fixup_pointer(&level3_kernel_pgt, physaddr);
74+
pud[510] += load_delta;
75+
pud[511] += load_delta;
76+
77+
pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
78+
pmd[506] += load_delta;
79+
80+
/*
81+
* Set up the identity mapping for the switchover. These
82+
* entries should *NOT* have the global bit set! This also
83+
* creates a bunch of nonsense entries but that is fine --
84+
* it avoids problems around wraparound.
85+
*/
86+
87+
pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
88+
pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
89+
90+
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
91+
pgd[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
92+
pgd[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
93+
94+
i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
95+
pud[i + 0] = (pudval_t)pmd + _KERNPG_TABLE;
96+
pud[i + 1] = (pudval_t)pmd + _KERNPG_TABLE;
97+
98+
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
99+
pmd_entry += physaddr;
100+
101+
for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
102+
int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
103+
pmd[idx] = pmd_entry + i * PMD_SIZE;
104+
}
105+
106+
/*
107+
* Fixup the kernel text+data virtual addresses. Note that
108+
* we might write invalid pmds, when the kernel is relocated
109+
* cleanup_highmap() fixes this up along with the mappings
110+
* beyond _end.
111+
*/
112+
113+
pmd = fixup_pointer(level2_kernel_pgt, physaddr);
114+
for (i = 0; i < PTRS_PER_PMD; i++) {
115+
if (pmd[i] & _PAGE_PRESENT)
116+
pmd[i] += load_delta;
117+
}
118+
119+
/* Fixup phys_base */
120+
p = fixup_pointer(&phys_base, physaddr);
121+
*p += load_delta;
122+
}
123+
41124
/* Wipe all early page tables except for the kernel symbol map */
42125
static void __init reset_early_page_tables(void)
43126
{

arch/x86/kernel/head_64.S

Lines changed: 3 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -72,100 +72,11 @@ startup_64:
7272
/* Sanitize CPU configuration */
7373
call verify_cpu
7474

75-
/*
76-
* Compute the delta between the address I am compiled to run at and the
77-
* address I am actually running at.
78-
*/
79-
leaq _text(%rip), %rbp
80-
subq $_text - __START_KERNEL_map, %rbp
81-
82-
/* Is the address not 2M aligned? */
83-
testl $~PMD_PAGE_MASK, %ebp
84-
jnz bad_address
85-
86-
/*
87-
* Is the address too large?
88-
*/
89-
leaq _text(%rip), %rax
90-
shrq $MAX_PHYSMEM_BITS, %rax
91-
jnz bad_address
92-
93-
/*
94-
* Fixup the physical addresses in the page table
95-
*/
96-
addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
97-
98-
addq %rbp, level3_kernel_pgt + (510*8)(%rip)
99-
addq %rbp, level3_kernel_pgt + (511*8)(%rip)
100-
101-
addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
102-
103-
/*
104-
* Set up the identity mapping for the switchover. These
105-
* entries should *NOT* have the global bit set! This also
106-
* creates a bunch of nonsense entries but that is fine --
107-
* it avoids problems around wraparound.
108-
*/
10975
leaq _text(%rip), %rdi
110-
leaq early_level4_pgt(%rip), %rbx
111-
112-
movq %rdi, %rax
113-
shrq $PGDIR_SHIFT, %rax
114-
115-
leaq (PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
116-
movq %rdx, 0(%rbx,%rax,8)
117-
movq %rdx, 8(%rbx,%rax,8)
118-
119-
addq $PAGE_SIZE, %rdx
120-
movq %rdi, %rax
121-
shrq $PUD_SHIFT, %rax
122-
andl $(PTRS_PER_PUD-1), %eax
123-
movq %rdx, PAGE_SIZE(%rbx,%rax,8)
124-
incl %eax
125-
andl $(PTRS_PER_PUD-1), %eax
126-
movq %rdx, PAGE_SIZE(%rbx,%rax,8)
127-
128-
addq $PAGE_SIZE * 2, %rbx
129-
movq %rdi, %rax
130-
shrq $PMD_SHIFT, %rdi
131-
addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
132-
leaq (_end - 1)(%rip), %rcx
133-
shrq $PMD_SHIFT, %rcx
134-
subq %rdi, %rcx
135-
incl %ecx
76+
pushq %rsi
77+
call __startup_64
78+
popq %rsi
13679

137-
1:
138-
andq $(PTRS_PER_PMD - 1), %rdi
139-
movq %rax, (%rbx,%rdi,8)
140-
incq %rdi
141-
addq $PMD_SIZE, %rax
142-
decl %ecx
143-
jnz 1b
144-
145-
test %rbp, %rbp
146-
jz .Lskip_fixup
147-
148-
/*
149-
* Fixup the kernel text+data virtual addresses. Note that
150-
* we might write invalid pmds, when the kernel is relocated
151-
* cleanup_highmap() fixes this up along with the mappings
152-
* beyond _end.
153-
*/
154-
leaq level2_kernel_pgt(%rip), %rdi
155-
leaq PAGE_SIZE(%rdi), %r8
156-
/* See if it is a valid page table entry */
157-
1: testb $_PAGE_PRESENT, 0(%rdi)
158-
jz 2f
159-
addq %rbp, 0(%rdi)
160-
/* Go to the next page */
161-
2: addq $8, %rdi
162-
cmp %r8, %rdi
163-
jne 1b
164-
165-
/* Fixup phys_base */
166-
addq %rbp, phys_base(%rip)
167-
168-
.Lskip_fixup:
16980
movq $(early_level4_pgt - __START_KERNEL_map), %rax
17081
jmp 1f
17182
ENTRY(secondary_startup_64)

0 commit comments

Comments
 (0)