Skip to content

Commit b7b27fa

Browse files
gengdongjiu1Marc Zyngier
authored andcommitted
arm/arm64: KVM: Add KVM_GET/SET_VCPU_EVENTS
For the migrating VMs, user space may need to know the exception state. For example, in the machine A, KVM make an SError pending, when migrate to B, KVM also needs to pend an SError. This new IOCTL exports user-invisible states related to SError. Together with appropriate user space changes, user space can get/set the SError exception state to do migrate/snapshot/suspend. Signed-off-by: Dongjiu Geng <[email protected]> Reviewed-by: James Morse <[email protected]> [expanded documentation wording] Signed-off-by: James Morse <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 327432c commit b7b27fa

File tree

8 files changed

+146
-7
lines changed

8 files changed

+146
-7
lines changed

Documentation/virtual/kvm/api.txt

Lines changed: 50 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -835,11 +835,13 @@ struct kvm_clock_data {
835835

836836
Capability: KVM_CAP_VCPU_EVENTS
837837
Extended by: KVM_CAP_INTR_SHADOW
838-
Architectures: x86
839-
Type: vm ioctl
838+
Architectures: x86, arm64
839+
Type: vcpu ioctl
840840
Parameters: struct kvm_vcpu_event (out)
841841
Returns: 0 on success, -1 on error
842842

843+
X86:
844+
843845
Gets currently pending exceptions, interrupts, and NMIs as well as related
844846
states of the vcpu.
845847

@@ -881,15 +883,52 @@ Only two fields are defined in the flags field:
881883
- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
882884
smi contains a valid state.
883885

886+
ARM64:
887+
888+
If the guest accesses a device that is being emulated by the host kernel in
889+
such a way that a real device would generate a physical SError, KVM may make
890+
a virtual SError pending for that VCPU. This system error interrupt remains
891+
pending until the guest takes the exception by unmasking PSTATE.A.
892+
893+
Running the VCPU may cause it to take a pending SError, or make an access that
894+
causes an SError to become pending. The event's description is only valid while
895+
the VPCU is not running.
896+
897+
This API provides a way to read and write the pending 'event' state that is not
898+
visible to the guest. To save, restore or migrate a VCPU the struct representing
899+
the state can be read then written using this GET/SET API, along with the other
900+
guest-visible registers. It is not possible to 'cancel' an SError that has been
901+
made pending.
902+
903+
A device being emulated in user-space may also wish to generate an SError. To do
904+
this the events structure can be populated by user-space. The current state
905+
should be read first, to ensure no existing SError is pending. If an existing
906+
SError is pending, the architecture's 'Multiple SError interrupts' rules should
907+
be followed. (2.5.3 of DDI0587.a "ARM Reliability, Availability, and
908+
Serviceability (RAS) Specification").
909+
910+
struct kvm_vcpu_events {
911+
struct {
912+
__u8 serror_pending;
913+
__u8 serror_has_esr;
914+
/* Align it to 8 bytes */
915+
__u8 pad[6];
916+
__u64 serror_esr;
917+
} exception;
918+
__u32 reserved[12];
919+
};
920+
884921
4.32 KVM_SET_VCPU_EVENTS
885922

886923
Capability: KVM_CAP_VCPU_EVENTS
887924
Extended by: KVM_CAP_INTR_SHADOW
888-
Architectures: x86
889-
Type: vm ioctl
925+
Architectures: x86, arm64
926+
Type: vcpu ioctl
890927
Parameters: struct kvm_vcpu_event (in)
891928
Returns: 0 on success, -1 on error
892929

930+
X86:
931+
893932
Set pending exceptions, interrupts, and NMIs as well as related states of the
894933
vcpu.
895934

@@ -910,6 +949,13 @@ shall be written into the VCPU.
910949

911950
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
912951

952+
ARM64:
953+
954+
Set the pending SError exception state for this VCPU. It is not possible to
955+
'cancel' an Serror that has been made pending.
956+
957+
See KVM_GET_VCPU_EVENTS for the data structure.
958+
913959

914960
4.33 KVM_GET_DEBUGREGS
915961

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,11 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
9393
vcpu->arch.hcr_el2 |= HCR_TWE;
9494
}
9595

96+
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
97+
{
98+
return vcpu->arch.vsesr_el2;
99+
}
100+
96101
static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
97102
{
98103
vcpu->arch.vsesr_el2 = vsesr;

arch/arm64/include/asm/kvm_host.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,11 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
350350
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
351351
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
352352
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
353+
int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
354+
struct kvm_vcpu_events *events);
355+
356+
int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
357+
struct kvm_vcpu_events *events);
353358

354359
#define KVM_ARCH_WANT_MMU_NOTIFIER
355360
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
@@ -378,6 +383,8 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
378383
int kvm_perf_init(void);
379384
int kvm_perf_teardown(void);
380385

386+
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
387+
381388
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
382389

383390
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);

arch/arm64/include/uapi/asm/kvm.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
#define __KVM_HAVE_GUEST_DEBUG
4040
#define __KVM_HAVE_IRQ_LINE
4141
#define __KVM_HAVE_READONLY_MEM
42+
#define __KVM_HAVE_VCPU_EVENTS
4243

4344
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
4445

@@ -154,6 +155,18 @@ struct kvm_sync_regs {
154155
struct kvm_arch_memory_slot {
155156
};
156157

158+
/* for KVM_GET/SET_VCPU_EVENTS */
159+
struct kvm_vcpu_events {
160+
struct {
161+
__u8 serror_pending;
162+
__u8 serror_has_esr;
163+
/* Align it to 8 bytes */
164+
__u8 pad[6];
165+
__u64 serror_esr;
166+
} exception;
167+
__u32 reserved[12];
168+
};
169+
157170
/* If you need to interpret the index values, here is the key: */
158171
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
159172
#define KVM_REG_ARM_COPROC_SHIFT 16

arch/arm64/kvm/guest.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,52 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
289289
return -EINVAL;
290290
}
291291

292+
int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
293+
struct kvm_vcpu_events *events)
294+
{
295+
memset(events, 0, sizeof(*events));
296+
297+
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
298+
events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
299+
300+
if (events->exception.serror_pending && events->exception.serror_has_esr)
301+
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
302+
303+
return 0;
304+
}
305+
306+
int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
307+
struct kvm_vcpu_events *events)
308+
{
309+
int i;
310+
bool serror_pending = events->exception.serror_pending;
311+
bool has_esr = events->exception.serror_has_esr;
312+
313+
/* check whether the reserved field is zero */
314+
for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
315+
if (events->reserved[i])
316+
return -EINVAL;
317+
318+
/* check whether the pad field is zero */
319+
for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
320+
if (events->exception.pad[i])
321+
return -EINVAL;
322+
323+
if (serror_pending && has_esr) {
324+
if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
325+
return -EINVAL;
326+
327+
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
328+
kvm_set_sei_esr(vcpu, events->exception.serror_esr);
329+
else
330+
return -EINVAL;
331+
} else if (serror_pending) {
332+
kvm_inject_vabt(vcpu);
333+
}
334+
335+
return 0;
336+
}
337+
292338
int __attribute_const__ kvm_target_cpu(void)
293339
{
294340
unsigned long implementor = read_cpuid_implementor();

arch/arm64/kvm/inject_fault.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -164,9 +164,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
164164
inject_undef64(vcpu);
165165
}
166166

167-
static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
167+
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
168168
{
169-
vcpu_set_vsesr(vcpu, esr);
169+
vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
170170
*vcpu_hcr(vcpu) |= HCR_VSE;
171171
}
172172

@@ -184,5 +184,5 @@ static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
184184
*/
185185
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
186186
{
187-
pend_guest_serror(vcpu, ESR_ELx_ISV);
187+
kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
188188
}

arch/arm64/kvm/reset.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
7979
break;
8080
case KVM_CAP_SET_GUEST_DEBUG:
8181
case KVM_CAP_VCPU_ATTRIBUTES:
82+
case KVM_CAP_VCPU_EVENTS:
8283
r = 1;
8384
break;
8485
default:

virt/kvm/arm/arm.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1130,6 +1130,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
11301130
r = kvm_arm_vcpu_has_attr(vcpu, &attr);
11311131
break;
11321132
}
1133+
#ifdef __KVM_HAVE_VCPU_EVENTS
1134+
case KVM_GET_VCPU_EVENTS: {
1135+
struct kvm_vcpu_events events;
1136+
1137+
if (kvm_arm_vcpu_get_events(vcpu, &events))
1138+
return -EINVAL;
1139+
1140+
if (copy_to_user(argp, &events, sizeof(events)))
1141+
return -EFAULT;
1142+
1143+
return 0;
1144+
}
1145+
case KVM_SET_VCPU_EVENTS: {
1146+
struct kvm_vcpu_events events;
1147+
1148+
if (copy_from_user(&events, argp, sizeof(events)))
1149+
return -EFAULT;
1150+
1151+
return kvm_arm_vcpu_set_events(vcpu, &events);
1152+
}
1153+
#endif
11331154
default:
11341155
r = -EINVAL;
11351156
}

0 commit comments

Comments
 (0)