Skip to content

Commit 66d5b53

Browse files
Fuad TabbaMarc Zyngier
authored andcommitted
KVM: arm64: Allocate memory mapped at hyp for host sve state in pKVM
Protected mode needs to maintain (save/restore) the host's sve state, rather than relying on the host kernel to do that. This is to avoid leaking information to the host about guests and the type of operations they are performing. As a first step towards that, allocate memory mapped at hyp, per cpu, for the host sve state. The following patch will use this memory to save/restore the host state. Reviewed-by: Oliver Upton <[email protected]> Signed-off-by: Fuad Tabba <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent e511e08 commit 66d5b53

File tree

7 files changed

+124
-0
lines changed

7 files changed

+124
-0
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
7676
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
7777

7878
extern unsigned int __ro_after_init kvm_sve_max_vl;
79+
extern unsigned int __ro_after_init kvm_host_sve_max_vl;
7980
int __init kvm_arm_init_sve(void);
8081

8182
u32 __attribute_const__ kvm_target_cpu(void);
@@ -521,6 +522,20 @@ struct kvm_cpu_context {
521522
u64 *vncr_array;
522523
};
523524

525+
struct cpu_sve_state {
526+
__u64 zcr_el1;
527+
528+
/*
529+
* Ordering is important since __sve_save_state/__sve_restore_state
530+
* relies on it.
531+
*/
532+
__u32 fpsr;
533+
__u32 fpcr;
534+
535+
/* Must be SVE_VQ_BYTES (128 bit) aligned. */
536+
__u8 sve_regs[];
537+
};
538+
524539
/*
525540
* This structure is instantiated on a per-CPU basis, and contains
526541
* data that is:
@@ -534,7 +549,9 @@ struct kvm_cpu_context {
534549
*/
535550
struct kvm_host_data {
536551
struct kvm_cpu_context host_ctxt;
552+
537553
struct user_fpsimd_state *fpsimd_state; /* hyp VA */
554+
struct cpu_sve_state *sve_state; /* hyp VA */
538555

539556
/* Ownership of the FP regs */
540557
enum {

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
143143

144144
extern unsigned long kvm_nvhe_sym(__icache_flags);
145145
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
146+
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
146147

147148
#endif /* __ARM64_KVM_HYP_H__ */

arch/arm64/include/asm/kvm_pkvm.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void)
128128
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
129129
}
130130

131+
static inline size_t pkvm_host_sve_state_size(void)
132+
{
133+
if (!system_supports_sve())
134+
return 0;
135+
136+
return size_add(sizeof(struct cpu_sve_state),
137+
SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
138+
}
139+
131140
#endif /* __ARM64_KVM_PKVM_H__ */

arch/arm64/kvm/arm.c

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void)
19311931
return size ? get_order(size) : 0;
19321932
}
19331933

1934+
static size_t pkvm_host_sve_state_order(void)
1935+
{
1936+
return get_order(pkvm_host_sve_state_size());
1937+
}
1938+
19341939
/* A lookup table holding the hypervisor VA for each vector slot */
19351940
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
19361941

@@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void)
23102315

23112316
static void __init teardown_hyp_mode(void)
23122317
{
2318+
bool free_sve = system_supports_sve() && is_protected_kvm_enabled();
23132319
int cpu;
23142320

23152321
free_hyp_pgds();
23162322
for_each_possible_cpu(cpu) {
23172323
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
23182324
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2325+
2326+
if (free_sve) {
2327+
struct cpu_sve_state *sve_state;
2328+
2329+
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2330+
free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
2331+
}
23192332
}
23202333
}
23212334

@@ -2398,6 +2411,50 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
23982411
return 0;
23992412
}
24002413

2414+
static int init_pkvm_host_sve_state(void)
2415+
{
2416+
int cpu;
2417+
2418+
if (!system_supports_sve())
2419+
return 0;
2420+
2421+
/* Allocate pages for host sve state in protected mode. */
2422+
for_each_possible_cpu(cpu) {
2423+
struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order());
2424+
2425+
if (!page)
2426+
return -ENOMEM;
2427+
2428+
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page);
2429+
}
2430+
2431+
/*
2432+
* Don't map the pages in hyp since these are only used in protected
2433+
* mode, which will (re)create its own mapping when initialized.
2434+
*/
2435+
2436+
return 0;
2437+
}
2438+
2439+
/*
2440+
* Finalizes the initialization of hyp mode, once everything else is initialized
2441+
* and the initialziation process cannot fail.
2442+
*/
2443+
static void finalize_init_hyp_mode(void)
2444+
{
2445+
int cpu;
2446+
2447+
if (!is_protected_kvm_enabled() || !system_supports_sve())
2448+
return;
2449+
2450+
for_each_possible_cpu(cpu) {
2451+
struct cpu_sve_state *sve_state;
2452+
2453+
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
2454+
per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = kern_hyp_va(sve_state);
2455+
}
2456+
}
2457+
24012458
static void pkvm_hyp_init_ptrauth(void)
24022459
{
24032460
struct kvm_cpu_context *hyp_ctxt;
@@ -2566,6 +2623,10 @@ static int __init init_hyp_mode(void)
25662623
goto out_err;
25672624
}
25682625

2626+
err = init_pkvm_host_sve_state();
2627+
if (err)
2628+
goto out_err;
2629+
25692630
err = kvm_hyp_init_protection(hyp_va_bits);
25702631
if (err) {
25712632
kvm_err("Failed to init hyp memory protection\n");
@@ -2730,6 +2791,13 @@ static __init int kvm_arm_init(void)
27302791
if (err)
27312792
goto out_subs;
27322793

2794+
/*
2795+
* This should be called after initialization is done and failure isn't
2796+
* possible anymore.
2797+
*/
2798+
if (!in_hyp_mode)
2799+
finalize_init_hyp_mode();
2800+
27332801
kvm_arm_initialised = true;
27342802

27352803
return 0;

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ unsigned long __icache_flags;
1818
/* Used by kvm_get_vttbr(). */
1919
unsigned int kvm_arm_vmid_bits;
2020

21+
unsigned int kvm_host_sve_max_vl;
22+
2123
/*
2224
* Set trap register values based on features in ID_AA64PFR0.
2325
*/

arch/arm64/kvm/hyp/nvhe/setup.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size)
6767
return 0;
6868
}
6969

70+
static int pkvm_create_host_sve_mappings(void)
71+
{
72+
void *start, *end;
73+
int ret, i;
74+
75+
if (!system_supports_sve())
76+
return 0;
77+
78+
for (i = 0; i < hyp_nr_cpus; i++) {
79+
struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
80+
struct cpu_sve_state *sve_state = host_data->sve_state;
81+
82+
start = kern_hyp_va(sve_state);
83+
end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
84+
ret = pkvm_create_mappings(start, end, PAGE_HYP);
85+
if (ret)
86+
return ret;
87+
}
88+
89+
return 0;
90+
}
91+
7092
static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
7193
unsigned long *per_cpu_base,
7294
u32 hyp_va_bits)
@@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
125147
return ret;
126148
}
127149

150+
pkvm_create_host_sve_mappings();
151+
128152
/*
129153
* Map the host sections RO in the hypervisor, but transfer the
130154
* ownership from the host to the hypervisor itself to make sure they

arch/arm64/kvm/reset.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232

3333
/* Maximum phys_shift supported for any VM on this host */
3434
static u32 __ro_after_init kvm_ipa_limit;
35+
unsigned int __ro_after_init kvm_host_sve_max_vl;
3536

3637
/*
3738
* ARMv8 Reset Values
@@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void)
5152
{
5253
if (system_supports_sve()) {
5354
kvm_sve_max_vl = sve_max_virtualisable_vl();
55+
kvm_host_sve_max_vl = sve_max_vl();
56+
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
5457

5558
/*
5659
* The get_sve_reg()/set_sve_reg() ioctl interface will need

0 commit comments

Comments
 (0)