Skip to content

Commit 1b6ba46

Browse files
committed
ARM: LPAE: MMU setup for the 3-level page table format
This patch adds the MMU initialisation for the LPAE page table format. The swapper_pg_dir size with LPAE is 5 rather than 4 pages. A new proc-v7-3level.S file contains the TTB initialisation, context switch and PTE setting code with the LPAE. The TTBRx split is based on the PAGE_OFFSET with TTBR1 used for the kernel mappings. The 36-bit mappings (supersections) and a few other memory types in mmu.c are conditionally compiled. Signed-off-by: Catalin Marinas <[email protected]>
1 parent da02877 commit 1b6ba46

File tree

5 files changed

+243
-12
lines changed

5 files changed

+243
-12
lines changed

arch/arm/kernel/head.S

Lines changed: 43 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,14 @@
3939
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
4040
#endif
4141

42+
#ifdef CONFIG_ARM_LPAE
43+
/* LPAE requires an additional page for the PGD */
44+
#define PG_DIR_SIZE 0x5000
45+
#define PMD_ORDER 3
46+
#else
4247
#define PG_DIR_SIZE 0x4000
4348
#define PMD_ORDER 2
49+
#endif
4450

4551
.globl swapper_pg_dir
4652
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
@@ -164,6 +170,25 @@ __create_page_tables:
164170
teq r0, r6
165171
bne 1b
166172

173+
#ifdef CONFIG_ARM_LPAE
174+
/*
175+
* Build the PGD table (first level) to point to the PMD table. A PGD
176+
* entry is 64-bit wide.
177+
*/
178+
mov r0, r4
179+
add r3, r4, #0x1000 @ first PMD table address
180+
orr r3, r3, #3 @ PGD block type
181+
mov r6, #4 @ PTRS_PER_PGD
182+
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
183+
1: str r3, [r0], #4 @ set bottom PGD entry bits
184+
str r7, [r0], #4 @ set top PGD entry bits
185+
add r3, r3, #0x1000 @ next PMD table
186+
subs r6, r6, #1
187+
bne 1b
188+
189+
add r4, r4, #0x1000 @ point to the PMD tables
190+
#endif
191+
167192
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
168193

169194
/*
@@ -219,8 +244,8 @@ __create_page_tables:
219244
#endif
220245

221246
/*
222-
* Then map boot params address in r2 or
223-
* the first 1MB of ram if boot params address is not specified.
247+
* Then map boot params address in r2 or the first 1MB (2MB with LPAE)
248+
* of ram if boot params address is not specified.
224249
*/
225250
mov r0, r2, lsr #SECTION_SHIFT
226251
movs r0, r0, lsl #SECTION_SHIFT
@@ -251,7 +276,15 @@ __create_page_tables:
251276
mov r3, r7, lsr #SECTION_SHIFT
252277
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
253278
orr r3, r7, r3, lsl #SECTION_SHIFT
279+
#ifdef CONFIG_ARM_LPAE
280+
mov r7, #1 << (54 - 32) @ XN
281+
#else
282+
orr r3, r3, #PMD_SECT_XN
283+
#endif
254284
1: str r3, [r0], #4
285+
#ifdef CONFIG_ARM_LPAE
286+
str r7, [r0], #4
287+
#endif
255288
add r3, r3, #1 << SECTION_SHIFT
256289
cmp r0, r6
257290
blo 1b
@@ -282,6 +315,9 @@ __create_page_tables:
282315
add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
283316
str r3, [r0]
284317
#endif
318+
#endif
319+
#ifdef CONFIG_ARM_LPAE
320+
sub r4, r4, #0x1000 @ point to the PGD table
285321
#endif
286322
mov pc, lr
287323
ENDPROC(__create_page_tables)
@@ -374,12 +410,17 @@ __enable_mmu:
374410
#ifdef CONFIG_CPU_ICACHE_DISABLE
375411
bic r0, r0, #CR_I
376412
#endif
413+
#ifdef CONFIG_ARM_LPAE
414+
mov r5, #0
415+
mcrr p15, 0, r4, r5, c2 @ load TTBR0
416+
#else
377417
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
378418
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
379419
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
380420
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
381421
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
382422
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
423+
#endif
383424
b __turn_mmu_on
384425
ENDPROC(__enable_mmu)
385426

arch/arm/mm/mmu.c

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,7 @@ static int __init early_nowrite(char *__unused)
150150
}
151151
early_param("nowb", early_nowrite);
152152

153+
#ifndef CONFIG_ARM_LPAE
153154
static int __init early_ecc(char *p)
154155
{
155156
if (memcmp(p, "on", 2) == 0)
@@ -159,6 +160,7 @@ static int __init early_ecc(char *p)
159160
return 0;
160161
}
161162
early_param("ecc", early_ecc);
163+
#endif
162164

163165
static int __init noalign_setup(char *__unused)
164166
{
@@ -228,10 +230,12 @@ static struct mem_type mem_types[] = {
228230
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
229231
.domain = DOMAIN_KERNEL,
230232
},
233+
#ifndef CONFIG_ARM_LPAE
231234
[MT_MINICLEAN] = {
232235
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
233236
.domain = DOMAIN_KERNEL,
234237
},
238+
#endif
235239
[MT_LOW_VECTORS] = {
236240
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
237241
L_PTE_RDONLY,
@@ -429,13 +433,15 @@ static void __init build_mem_type_table(void)
429433
* ARMv6 and above have extended page tables.
430434
*/
431435
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
436+
#ifndef CONFIG_ARM_LPAE
432437
/*
433438
* Mark cache clean areas and XIP ROM read only
434439
* from SVC mode and no access from userspace.
435440
*/
436441
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
437442
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438443
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
444+
#endif
439445

440446
if (is_smp()) {
441447
/*
@@ -474,6 +480,18 @@ static void __init build_mem_type_table(void)
474480
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
475481
}
476482

483+
#ifdef CONFIG_ARM_LPAE
484+
/*
485+
* Do not generate access flag faults for the kernel mappings.
486+
*/
487+
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
488+
mem_types[i].prot_pte |= PTE_EXT_AF;
489+
mem_types[i].prot_sect |= PMD_SECT_AF;
490+
}
491+
kern_pgprot |= PTE_EXT_AF;
492+
vecs_pgprot |= PTE_EXT_AF;
493+
#endif
494+
477495
for (i = 0; i < 16; i++) {
478496
unsigned long v = pgprot_val(protection_map[i]);
479497
protection_map[i] = __pgprot(v | user_pgprot);
@@ -572,8 +590,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
572590
if (((addr | end | phys) & ~SECTION_MASK) == 0) {
573591
pmd_t *p = pmd;
574592

593+
#ifndef CONFIG_ARM_LPAE
575594
if (addr & SECTION_SIZE)
576595
pmd++;
596+
#endif
577597

578598
do {
579599
*pmd = __pmd(phys | type->prot_sect);
@@ -603,6 +623,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
603623
} while (pud++, addr = next, addr != end);
604624
}
605625

626+
#ifndef CONFIG_ARM_LPAE
606627
static void __init create_36bit_mapping(struct map_desc *md,
607628
const struct mem_type *type)
608629
{
@@ -662,6 +683,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
662683
pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
663684
} while (addr != end);
664685
}
686+
#endif /* !CONFIG_ARM_LPAE */
665687

666688
/*
667689
* Create the page directory entries and any necessary
@@ -693,13 +715,15 @@ static void __init create_mapping(struct map_desc *md)
693715

694716
type = &mem_types[md->type];
695717

718+
#ifndef CONFIG_ARM_LPAE
696719
/*
697720
* Catch 36-bit addresses
698721
*/
699722
if (md->pfn >= 0x100000) {
700723
create_36bit_mapping(md, type);
701724
return;
702725
}
726+
#endif
703727

704728
addr = md->virtual & PAGE_MASK;
705729
phys = __pfn_to_phys(md->pfn);
@@ -897,7 +921,13 @@ static inline void prepare_page_table(void)
897921
pmd_clear(pmd_off_k(addr));
898922
}
899923

924+
#ifdef CONFIG_ARM_LPAE
925+
/* the first page is reserved for pgd */
926+
#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
927+
PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
928+
#else
900929
#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
930+
#endif
901931

902932
/*
903933
* Reserve the special regions of memory

arch/arm/mm/proc-macros.S

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,8 +91,9 @@
9191
#if L_PTE_SHARED != PTE_EXT_SHARED
9292
#error PTE shared bit mismatch
9393
#endif
94-
#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
95-
L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
94+
#if !defined (CONFIG_ARM_LPAE) && \
95+
(L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
96+
L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
9697
#error Invalid Linux PTE bit settings
9798
#endif
9899
#endif /* CONFIG_MMU */

arch/arm/mm/proc-v7-3level.S

Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
/*
2+
* arch/arm/mm/proc-v7-3level.S
3+
*
4+
* Copyright (C) 2001 Deep Blue Solutions Ltd.
5+
* Copyright (C) 2011 ARM Ltd.
6+
* Author: Catalin Marinas <[email protected]>
7+
* based on arch/arm/mm/proc-v7-2level.S
8+
*
9+
* This program is free software; you can redistribute it and/or modify
10+
* it under the terms of the GNU General Public License version 2 as
11+
* published by the Free Software Foundation.
12+
*
13+
* This program is distributed in the hope that it will be useful,
14+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
15+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16+
* GNU General Public License for more details.
17+
*
18+
* You should have received a copy of the GNU General Public License
19+
* along with this program; if not, write to the Free Software
20+
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21+
*/
22+
23+
#define TTB_IRGN_NC (0 << 8)
24+
#define TTB_IRGN_WBWA (1 << 8)
25+
#define TTB_IRGN_WT (2 << 8)
26+
#define TTB_IRGN_WB (3 << 8)
27+
#define TTB_RGN_NC (0 << 10)
28+
#define TTB_RGN_OC_WBWA (1 << 10)
29+
#define TTB_RGN_OC_WT (2 << 10)
30+
#define TTB_RGN_OC_WB (3 << 10)
31+
#define TTB_S (3 << 12)
32+
#define TTB_EAE (1 << 31)
33+
34+
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
35+
#define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB)
36+
#define PMD_FLAGS_UP (PMD_SECT_WB)
37+
38+
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
39+
#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
40+
#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S)
41+
42+
/*
43+
* cpu_v7_switch_mm(pgd_phys, tsk)
44+
*
45+
* Set the translation table base pointer to be pgd_phys (physical address of
46+
* the new TTB).
47+
*/
48+
ENTRY(cpu_v7_switch_mm)
49+
#ifdef CONFIG_MMU
50+
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
51+
and r3, r1, #0xff
52+
mov r3, r3, lsl #(48 - 32) @ ASID
53+
mcrr p15, 0, r0, r3, c2 @ set TTB 0
54+
isb
55+
#endif
56+
mov pc, lr
57+
ENDPROC(cpu_v7_switch_mm)
58+
59+
/*
60+
* cpu_v7_set_pte_ext(ptep, pte)
61+
*
62+
* Set a level 2 translation table entry.
63+
* - ptep - pointer to level 3 translation table entry
64+
* - pte - PTE value to store (64-bit in r2 and r3)
65+
*/
66+
ENTRY(cpu_v7_set_pte_ext)
67+
#ifdef CONFIG_MMU
68+
tst r2, #L_PTE_PRESENT
69+
beq 1f
70+
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
71+
orreq r2, #L_PTE_RDONLY
72+
1: strd r2, r3, [r0]
73+
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
74+
#endif
75+
mov pc, lr
76+
ENDPROC(cpu_v7_set_pte_ext)
77+
78+
/*
79+
* Memory region attributes for LPAE (defined in pgtable-3level.h):
80+
*
81+
* n = AttrIndx[2:0]
82+
*
83+
* n MAIR
84+
* UNCACHED 000 00000000
85+
* BUFFERABLE 001 01000100
86+
* DEV_WC 001 01000100
87+
* WRITETHROUGH 010 10101010
88+
* WRITEBACK 011 11101110
89+
* DEV_CACHED 011 11101110
90+
* DEV_SHARED 100 00000100
91+
* DEV_NONSHARED 100 00000100
92+
* unused 101
93+
* unused 110
94+
* WRITEALLOC 111 11111111
95+
*/
96+
.equ PRRR, 0xeeaa4400 @ MAIR0
97+
.equ NMRR, 0xff000004 @ MAIR1
98+
99+
/*
100+
* Macro for setting up the TTBRx and TTBCR registers.
101+
* - \ttbr1 updated.
102+
*/
103+
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
104+
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
105+
cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below)
106+
mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
107+
orr \tmp, \tmp, #TTB_EAE
108+
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
109+
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
110+
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
111+
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16)
112+
/*
113+
* TTBR0/TTBR1 split (PAGE_OFFSET):
114+
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
115+
* 0x80000000: T0SZ = 0, T1SZ = 1
116+
* 0xc0000000: T0SZ = 0, T1SZ = 2
117+
*
118+
* Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
119+
* booting secondary CPUs would end up using TTBR1 for the identity
120+
* mapping set up in TTBR0.
121+
*/
122+
bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET?
123+
orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ
124+
#if defined CONFIG_VMSPLIT_2G
125+
/* PAGE_OFFSET == 0x80000000, T1SZ == 1 */
126+
add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries
127+
#elif defined CONFIG_VMSPLIT_3G
128+
/* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */
129+
add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd
130+
#endif
131+
/* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */
132+
9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register
133+
mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
134+
.endm
135+
136+
__CPUINIT
137+
138+
/*
139+
* AT
140+
* TFR EV X F IHD LR S
141+
* .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
142+
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
143+
* 11 0 110 1 0011 1100 .111 1101 < we want
144+
*/
145+
.align 2
146+
.type v7_crval, #object
147+
v7_crval:
148+
crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
149+
150+
.previous

0 commit comments

Comments
 (0)