Skip to content

Commit 94d0e59

Browse files
author
Marc Zyngier
committed
arm/arm64: KVM: Perform local TLB invalidation when multiplexing vcpus on a single CPU
Architecturally, TLBs are private to the (physical) CPU they're associated with. But when multiple vcpus from the same VM are being multiplexed on the same CPU, the TLBs are not private to the vcpus (and are actually shared across the VMID). Let's consider the following scenario: - vcpu-0 maps PA to VA - vcpu-1 maps PA' to VA If run on the same physical CPU, vcpu-1 can hit TLB entries generated by vcpu-0 accesses, and access the wrong physical page. The solution to this is to keep a per-VM map of which vcpu ran last on each given physical CPU, and invalidate local TLBs when switching to a different vcpu from the same VM. Reviewed-by: Christoffer Dall <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 07d9a38 commit 94d0e59

File tree

9 files changed

+66
-2
lines changed

9 files changed

+66
-2
lines changed

arch/arm/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[];
6666
extern void __kvm_flush_vm_context(void);
6767
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
6868
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
69+
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
6970

7071
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
7172

arch/arm/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,9 @@ struct kvm_arch {
5757
/* VTTBR value associated with below pgd and vmid */
5858
u64 vttbr;
5959

60+
/* The last vcpu id that ran on each physical CPU */
61+
int __percpu *last_vcpu_ran;
62+
6063
/* Timer */
6164
struct arch_timer_kvm timer;
6265

arch/arm/include/asm/kvm_hyp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@
7171
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
7272
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
7373
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
74+
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
7475
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
7576
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
7677
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)

arch/arm/kvm/arm.c

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
114114
*/
115115
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
116116
{
117-
int ret = 0;
117+
int ret, cpu;
118118

119119
if (type)
120120
return -EINVAL;
121121

122+
kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
123+
if (!kvm->arch.last_vcpu_ran)
124+
return -ENOMEM;
125+
126+
for_each_possible_cpu(cpu)
127+
*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
128+
122129
ret = kvm_alloc_stage2_pgd(kvm);
123130
if (ret)
124131
goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
141148
out_free_stage2_pgd:
142149
kvm_free_stage2_pgd(kvm);
143150
out_fail_alloc:
151+
free_percpu(kvm->arch.last_vcpu_ran);
152+
kvm->arch.last_vcpu_ran = NULL;
144153
return ret;
145154
}
146155

@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
168177
{
169178
int i;
170179

180+
free_percpu(kvm->arch.last_vcpu_ran);
181+
kvm->arch.last_vcpu_ran = NULL;
182+
171183
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
172184
if (kvm->vcpus[i]) {
173185
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
312324

313325
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
314326
{
327+
int *last_ran;
328+
329+
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
330+
331+
/*
332+
* We might get preempted before the vCPU actually runs, but
333+
* over-invalidation doesn't affect correctness.
334+
*/
335+
if (*last_ran != vcpu->vcpu_id) {
336+
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
337+
*last_ran = vcpu->vcpu_id;
338+
}
339+
315340
vcpu->cpu = cpu;
316341
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
317342

arch/arm/kvm/hyp/tlb.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,21 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
5555
__kvm_tlb_flush_vmid(kvm);
5656
}
5757

58+
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
59+
{
60+
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
61+
62+
/* Switch to requested VMID */
63+
write_sysreg(kvm->arch.vttbr, VTTBR);
64+
isb();
65+
66+
write_sysreg(0, TLBIALL);
67+
dsb(nsh);
68+
isb();
69+
70+
write_sysreg(0, VTTBR);
71+
}
72+
5873
void __hyp_text __kvm_flush_vm_context(void)
5974
{
6075
write_sysreg(0, TLBIALLNSNHIS);

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ extern char __kvm_hyp_vector[];
5454
extern void __kvm_flush_vm_context(void);
5555
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
5656
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
57+
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
5758

5859
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
5960

arch/arm64/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,9 @@ struct kvm_arch {
6262
/* VTTBR value associated with above pgd and vmid */
6363
u64 vttbr;
6464

65+
/* The last vcpu id that ran on each physical CPU */
66+
int __percpu *last_vcpu_ran;
67+
6568
/* The maximum number of vCPUs depends on the used GIC model */
6669
int max_vcpus;
6770

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
128128
return v;
129129
}
130130

131-
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
131+
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
132132

133133
/*
134134
* We currently only support a 40bit IPA.

arch/arm64/kvm/hyp/tlb.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,21 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
6464
write_sysreg(0, vttbr_el2);
6565
}
6666

67+
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
68+
{
69+
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
70+
71+
/* Switch to requested VMID */
72+
write_sysreg(kvm->arch.vttbr, vttbr_el2);
73+
isb();
74+
75+
asm volatile("tlbi vmalle1" : : );
76+
dsb(nsh);
77+
isb();
78+
79+
write_sysreg(0, vttbr_el2);
80+
}
81+
6782
void __hyp_text __kvm_flush_vm_context(void)
6883
{
6984
dsb(ishst);

0 commit comments

Comments
 (0)