Skip to content

Commit 685cc6a

Browse files
lian-bojfvogel
authored andcommitted
kdump/vmcore: support encrypted old memory with SME enabled
Orabug: 28796835 In kdump kernel, we need to dump the old memory into vmcore file,if SME is enabled in the first kernel, we have to remap the old memory with the memory encryption mask, which will be automatically decrypted when we read from DRAM. For SME kdump, there are two cases that doesn't support: ---------------------------------------------- | first-kernel | second-kernel | kdump support | | (mem_encrypt=on|off) | (yes|no) | |--------------+---------------+---------------| | on | on | yes | | off | off | yes | | on | off | no | | off | on | no | |______________|_______________|_______________| 1. SME is enabled in the first kernel, but SME is disabled in kdump kernel In this case, because the old memory is encrypted, we can't decrypt the old memory. 2. SME is disabled in the first kernel, but SME is enabled in kdump kernel On the one hand, the old memory is unencrypted, the old memory can be dumped as usual, we don't need to enable SME in kdump kernel; On the other hand, it will increase the complexity of the code, we will have to consider how to pass the SME flag from the first kernel to the kdump kernel, it is really too expensive to do this. This patches are only for SME kdump, the patches don't support SEV kdump. Signed-off-by: Lianbo Jiang <[email protected]> Signed-off-by: Dan Duval <[email protected]> Reviewed-by: Henry Willard <[email protected]>
1 parent 4f46ea1 commit 685cc6a

File tree

4 files changed

+81
-6
lines changed

4 files changed

+81
-6
lines changed

arch/x86/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,7 @@ obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
9393
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
9494
obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
9595
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
96+
obj-$(CONFIG_AMD_MEM_ENCRYPT) += crash_dump_encrypt.o
9697
obj-y += kprobes/
9798
obj-$(CONFIG_MODULES) += module.o
9899
obj-$(CONFIG_DOUBLEFAULT) += doublefault.o

arch/x86/kernel/crash_dump_encrypt.c

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Memory preserving reboot related code.
4+
*
5+
* Created by: Lianbo Jiang ([email protected])
6+
* Copyright (C) RedHat Corporation, 2018. All rights reserved
7+
*/
8+
9+
#include <linux/errno.h>
10+
#include <linux/crash_dump.h>
11+
#include <linux/uaccess.h>
12+
#include <linux/io.h>
13+
14+
/**
15+
* copy_oldmem_page_encrypted - copy one page from "oldmem encrypted"
16+
* @pfn: page frame number to be copied
17+
* @buf: target memory address for the copy; this can be in kernel address
18+
* space or user address space (see @userbuf)
19+
* @csize: number of bytes to copy
20+
* @offset: offset in bytes into the page (based on pfn) to begin the copy
21+
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
22+
* otherwise @buf is in kernel address space, use memcpy().
23+
*
24+
* Copy a page from "oldmem encrypted". For this page, there is no pte
25+
* mapped in the current kernel. We stitch up a pte, similar to
26+
* kmap_atomic.
27+
*/
28+
29+
ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
30+
size_t csize, unsigned long offset, int userbuf)
31+
{
32+
void *vaddr;
33+
34+
if (!csize)
35+
return 0;
36+
37+
vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT,
38+
PAGE_SIZE);
39+
if (!vaddr)
40+
return -ENOMEM;
41+
42+
if (userbuf) {
43+
if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
44+
iounmap((void __iomem *)vaddr);
45+
return -EFAULT;
46+
}
47+
} else
48+
memcpy(buf, vaddr + offset, csize);
49+
50+
set_iounmap_nonlazy();
51+
iounmap((void __iomem *)vaddr);
52+
return csize;
53+
}

fs/proc/vmcore.c

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@
2424
#include <linux/pagemap.h>
2525
#include <linux/uaccess.h>
2626
#include <asm/io.h>
27+
#include <linux/io.h>
28+
#include <linux/mem_encrypt.h>
29+
#include <asm/pgtable.h>
2730
#include "internal.h"
2831

2932
/* List representing chunks of contiguous memory areas and their offsets in
@@ -86,7 +89,8 @@ static int pfn_is_ram(unsigned long pfn)
8689

8790
/* Reads a page from the oldmem device from given offset. */
8891
static ssize_t read_from_oldmem(char *buf, size_t count,
89-
u64 *ppos, int userbuf)
92+
u64 *ppos, int userbuf,
93+
bool encrypted)
9094
{
9195
unsigned long pfn, offset;
9296
size_t nr_bytes;
@@ -108,8 +112,11 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
108112
if (pfn_is_ram(pfn) == 0)
109113
memset(buf, 0, nr_bytes);
110114
else {
111-
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
112-
offset, userbuf);
115+
tmp = encrypted ? copy_oldmem_page_encrypted(pfn,
116+
buf, nr_bytes, offset, userbuf)
117+
: copy_oldmem_page(pfn, buf, nr_bytes,
118+
offset, userbuf);
119+
113120
if (tmp < 0)
114121
return tmp;
115122
}
@@ -143,15 +150,15 @@ void __weak elfcorehdr_free(unsigned long long addr)
143150
*/
144151
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
145152
{
146-
return read_from_oldmem(buf, count, ppos, 0);
153+
return read_from_oldmem(buf, count, ppos, 0, false);
147154
}
148155

149156
/*
150157
* Architectures may override this function to read from notes sections
151158
*/
152159
ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
153160
{
154-
return read_from_oldmem(buf, count, ppos, 0);
161+
return read_from_oldmem(buf, count, ppos, 0, sme_active());
155162
}
156163

157164
/*
@@ -161,6 +168,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161168
unsigned long from, unsigned long pfn,
162169
unsigned long size, pgprot_t prot)
163170
{
171+
prot = pgprot_encrypted(prot);
164172
return remap_pfn_range(vma, from, pfn, size, prot);
165173
}
166174

@@ -235,7 +243,8 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
235243
m->offset + m->size - *fpos,
236244
buflen);
237245
start = m->paddr + *fpos - m->offset;
238-
tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
246+
tmp = read_from_oldmem(buffer, tsz, &start, userbuf,
247+
sme_active());
239248
if (tmp < 0)
240249
return tmp;
241250
buflen -= tsz;

include/linux/crash_dump.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,18 @@ extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
2525

2626
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
2727
unsigned long, int);
28+
#ifdef CONFIG_AMD_MEM_ENCRYPT
29+
extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
30+
size_t csize, unsigned long offset,
31+
int userbuf);
32+
#else
33+
static inline
34+
ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
35+
unsigned long offset, int userbuf)
36+
{
37+
return 0;
38+
}
39+
#endif
2840
void vmcore_cleanup(void);
2941

3042
/* Architecture code defines this if there are other possible ELF

0 commit comments

Comments
 (0)