Skip to content

Commit 463dbba

Browse files
linuswRussell King (Oracle)
authored andcommitted
ARM: 9104/2: Fix Keystone 2 kernel mapping regression
This fixes a Keystone 2 regression discovered as a side effect of defining an passing the physical start/end sections of the kernel to the MMU remapping code. As the Keystone applies an offset to all physical addresses, including those identified and patches by phys2virt, we fail to account for this offset in the kernel_sec_start and kernel_sec_end variables. Further these offsets can extend into the 64bit range on LPAE systems such as the Keystone 2. Fix it like this: - Extend kernel_sec_start and kernel_sec_end to be 64bit - Add the offset also to kernel_sec_start and kernel_sec_end As passing kernel_sec_start and kernel_sec_end as 64bit invariably incurs BE8 endianness issues I have attempted to dry-code around these. Tested on the Vexpress QEMU model both with and without LPAE enabled. Fixes: 6e121df ("ARM: 9090/1: Map the lowmem and kernel separately") Reported-by: Nishanth Menon <[email protected]> Suggested-by: Russell King <[email protected]> Tested-by: Grygorii Strashko <[email protected]> Tested-by: Nishanth Menon <[email protected]> Signed-off-by: Linus Walleij <[email protected]> Signed-off-by: Russell King (Oracle) <[email protected]>
1 parent e73f0f0 commit 463dbba

File tree

4 files changed

+27
-8
lines changed

4 files changed

+27
-8
lines changed

arch/arm/include/asm/memory.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,10 +160,11 @@ extern unsigned long vectors_base;
160160

161161
/*
162162
* Physical start and end address of the kernel sections. These addresses are
163-
* 2MB-aligned to match the section mappings placed over the kernel.
163+
* 2MB-aligned to match the section mappings placed over the kernel. We use
164+
* u64 so that LPAE mappings beyond the 32bit limit will work out as well.
164165
*/
165-
extern u32 kernel_sec_start;
166-
extern u32 kernel_sec_end;
166+
extern u64 kernel_sec_start;
167+
extern u64 kernel_sec_end;
167168

168169
/*
169170
* Physical vs virtual RAM address space conversion. These are

arch/arm/kernel/head.S

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,15 +49,18 @@
4949

5050
/*
5151
* This needs to be assigned at runtime when the linker symbols are
52-
* resolved.
52+
* resolved. These are unsigned 64bit really, but in this assembly code
53+
* We store them as 32bit.
5354
*/
5455
.pushsection .data
5556
.align 2
5657
.globl kernel_sec_start
5758
.globl kernel_sec_end
5859
kernel_sec_start:
5960
.long 0
61+
.long 0
6062
kernel_sec_end:
63+
.long 0
6164
.long 0
6265
.popsection
6366

@@ -250,7 +253,11 @@ __create_page_tables:
250253
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
251254
ldr r6, =(_end - 1)
252255
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
253-
str r8, [r5] @ Save physical start of kernel
256+
#ifdef CONFIG_CPU_ENDIAN_BE8
257+
str r8, [r5, #4] @ Save physical start of kernel (BE)
258+
#else
259+
str r8, [r5] @ Save physical start of kernel (LE)
260+
#endif
254261
orr r3, r8, r7 @ Add the MMU flags
255262
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
256263
1: str r3, [r0], #1 << PMD_ORDER
@@ -259,7 +266,11 @@ __create_page_tables:
259266
bls 1b
260267
eor r3, r3, r7 @ Remove the MMU flags
261268
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
262-
str r3, [r5] @ Save physical end of kernel
269+
#ifdef CONFIG_CPU_ENDIAN_BE8
270+
str r3, [r5, #4] @ Save physical end of kernel (BE)
271+
#else
272+
str r3, [r5] @ Save physical end of kernel (LE)
273+
#endif
263274

264275
#ifdef CONFIG_XIP_KERNEL
265276
/*

arch/arm/mm/mmu.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
16081608
if (offset == 0)
16091609
return;
16101610

1611+
/*
1612+
* Offset the kernel section physical offsets so that the kernel
1613+
* mapping will work out later on.
1614+
*/
1615+
kernel_sec_start += offset;
1616+
kernel_sec_end += offset;
1617+
16111618
/*
16121619
* Get the address of the remap function in the 1:1 identity
16131620
* mapping setup by the early page table assembly code. We
@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
17161723
{
17171724
void *zero_page;
17181725

1719-
pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
1726+
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
17201727
kernel_sec_start, kernel_sec_end);
17211728

17221729
prepare_page_table();

arch/arm/mm/pv-fixup-asm.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
2929
ldr r6, =(_end - 1)
3030
add r7, r2, #0x1000
3131
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
32-
add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
32+
add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
3333
1: ldrd r4, r5, [r7]
3434
adds r4, r4, r0
3535
adc r5, r5, r1

0 commit comments

Comments
 (0)