Skip to content

Commit 1f7c9d5

Browse files
committed
Merge tag 'kvm-riscv-6.16-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv changes for 6.16 - Add vector registers to get-reg-list selftest - VCPU reset related improvements - Remove scounteren initialization from VCPU reset - Support VCPU reset from userspace using set_mpstate() ioctl
2 parents 4d526b0 + 7917be1 commit 1f7c9d5

File tree

22 files changed

+374
-152
lines changed

22 files changed

+374
-152
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8541,6 +8541,17 @@ aforementioned registers before the first KVM_RUN. These registers are VM
85418541
scoped, meaning that the same set of values are presented on all vCPUs in a
85428542
given VM.
85438543

8544+
7.43 KVM_CAP_RISCV_MP_STATE_RESET
8545+
---------------------------------
8546+
8547+
:Architectures: riscv
8548+
:Type: VM
8549+
:Parameters: None
8550+
:Returns: 0 on success, -EINVAL if arg[0] is not zero
8551+
8552+
When this capability is enabled, KVM resets the VCPU when setting
8553+
MP_STATE_INIT_RECEIVED through IOCTL. The original MP_STATE is preserved.
8554+
85448555
8. Other capabilities.
85458556
======================
85468557

arch/riscv/include/asm/kvm_aia.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,6 @@ struct kvm_vcpu_aia {
6363
/* CPU AIA CSR context of Guest VCPU */
6464
struct kvm_vcpu_aia_csr guest_csr;
6565

66-
/* CPU AIA CSR context upon Guest VCPU reset */
67-
struct kvm_vcpu_aia_csr guest_reset_csr;
68-
6966
/* Guest physical address of IMSIC for this VCPU */
7067
gpa_t imsic_addr;
7168

arch/riscv/include/asm/kvm_host.h

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,9 @@ struct kvm_arch {
119119

120120
/* AIA Guest/VM context */
121121
struct kvm_aia aia;
122+
123+
/* KVM_CAP_RISCV_MP_STATE_RESET */
124+
bool mp_state_reset;
122125
};
123126

124127
struct kvm_cpu_trap {
@@ -193,6 +196,12 @@ struct kvm_vcpu_smstateen_csr {
193196
unsigned long sstateen0;
194197
};
195198

199+
struct kvm_vcpu_reset_state {
200+
spinlock_t lock;
201+
unsigned long pc;
202+
unsigned long a1;
203+
};
204+
196205
struct kvm_vcpu_arch {
197206
/* VCPU ran at least once */
198207
bool ran_atleast_once;
@@ -227,12 +236,8 @@ struct kvm_vcpu_arch {
227236
/* CPU Smstateen CSR context of Guest VCPU */
228237
struct kvm_vcpu_smstateen_csr smstateen_csr;
229238

230-
/* CPU context upon Guest VCPU reset */
231-
struct kvm_cpu_context guest_reset_context;
232-
spinlock_t reset_cntx_lock;
233-
234-
/* CPU CSR context upon Guest VCPU reset */
235-
struct kvm_vcpu_csr guest_reset_csr;
239+
/* CPU reset state of Guest VCPU */
240+
struct kvm_vcpu_reset_state reset_state;
236241

237242
/*
238243
* VCPU interrupts

arch/riscv/include/asm/kvm_vcpu_sbi.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
5555
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
5656
struct kvm_run *run,
5757
u32 type, u64 flags);
58+
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
59+
unsigned long pc, unsigned long a1);
60+
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu);
5861
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
5962
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
6063
const struct kvm_one_reg *reg);

arch/riscv/include/asm/kvm_vcpu_vector.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,7 @@ void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
3333
unsigned long *isa);
3434
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
3535
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
36-
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
37-
struct kvm_cpu_context *cntx);
36+
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu);
3837
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
3938
#else
4039

@@ -62,8 +61,7 @@ static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cn
6261
{
6362
}
6463

65-
static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
66-
struct kvm_cpu_context *cntx)
64+
static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
6765
{
6866
return 0;
6967
}

arch/riscv/kernel/head.S

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,12 @@ secondary_start_sbi:
131131
csrw CSR_IE, zero
132132
csrw CSR_IP, zero
133133

134+
#ifndef CONFIG_RISCV_M_MODE
135+
/* Enable time CSR */
136+
li t0, 0x2
137+
csrw CSR_SCOUNTEREN, t0
138+
#endif
139+
134140
/* Load the global pointer */
135141
load_global_pointer
136142

@@ -226,6 +232,10 @@ SYM_CODE_START(_start_kernel)
226232
* to hand it to us.
227233
*/
228234
csrr a0, CSR_MHARTID
235+
#else
236+
/* Enable time CSR */
237+
li t0, 0x2
238+
csrw CSR_SCOUNTEREN, t0
229239
#endif /* CONFIG_RISCV_M_MODE */
230240

231241
/* Load the global pointer */

arch/riscv/kvm/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ menuconfig VIRTUALIZATION
1818
if VIRTUALIZATION
1919

2020
config KVM
21-
tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
21+
tristate "Kernel-based Virtual Machine (KVM) support"
2222
depends on RISCV_SBI && MMU
2323
select HAVE_KVM_IRQCHIP
2424
select HAVE_KVM_IRQ_ROUTING

arch/riscv/kvm/aia_device.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -526,12 +526,10 @@ int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
526526
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
527527
{
528528
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
529-
struct kvm_vcpu_aia_csr *reset_csr =
530-
&vcpu->arch.aia_context.guest_reset_csr;
531529

532530
if (!kvm_riscv_aia_available())
533531
return;
534-
memcpy(csr, reset_csr, sizeof(*csr));
532+
memset(csr, 0, sizeof(*csr));
535533

536534
/* Proceed only if AIA was initialized successfully */
537535
if (!kvm_riscv_aia_initialized(vcpu->kvm))

arch/riscv/kvm/vcpu.c

Lines changed: 35 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -51,12 +51,33 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
5151
sizeof(kvm_vcpu_stats_desc),
5252
};
5353

54-
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
54+
static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu,
55+
bool kvm_sbi_reset)
5556
{
5657
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
57-
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
5858
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
59-
struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
59+
void *vector_datap = cntx->vector.datap;
60+
61+
memset(cntx, 0, sizeof(*cntx));
62+
memset(csr, 0, sizeof(*csr));
63+
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
64+
65+
/* Restore datap as it's not a part of the guest context. */
66+
cntx->vector.datap = vector_datap;
67+
68+
if (kvm_sbi_reset)
69+
kvm_riscv_vcpu_sbi_load_reset_state(vcpu);
70+
71+
/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
72+
cntx->sstatus = SR_SPP | SR_SPIE;
73+
74+
cntx->hstatus |= HSTATUS_VTW;
75+
cntx->hstatus |= HSTATUS_SPVP;
76+
cntx->hstatus |= HSTATUS_SPV;
77+
}
78+
79+
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset)
80+
{
6081
bool loaded;
6182

6283
/**
@@ -71,13 +92,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
7192

7293
vcpu->arch.last_exit_cpu = -1;
7394

74-
memcpy(csr, reset_csr, sizeof(*csr));
75-
76-
spin_lock(&vcpu->arch.reset_cntx_lock);
77-
memcpy(cntx, reset_cntx, sizeof(*cntx));
78-
spin_unlock(&vcpu->arch.reset_cntx_lock);
79-
80-
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
95+
kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset);
8196

8297
kvm_riscv_vcpu_fp_reset(vcpu);
8398

@@ -112,8 +127,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
112127
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
113128
{
114129
int rc;
115-
struct kvm_cpu_context *cntx;
116-
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
117130

118131
spin_lock_init(&vcpu->arch.mp_state_lock);
119132

@@ -133,24 +146,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
133146
/* Setup VCPU hfence queue */
134147
spin_lock_init(&vcpu->arch.hfence_lock);
135148

136-
/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
137-
spin_lock_init(&vcpu->arch.reset_cntx_lock);
149+
spin_lock_init(&vcpu->arch.reset_state.lock);
138150

139-
spin_lock(&vcpu->arch.reset_cntx_lock);
140-
cntx = &vcpu->arch.guest_reset_context;
141-
cntx->sstatus = SR_SPP | SR_SPIE;
142-
cntx->hstatus = 0;
143-
cntx->hstatus |= HSTATUS_VTW;
144-
cntx->hstatus |= HSTATUS_SPVP;
145-
cntx->hstatus |= HSTATUS_SPV;
146-
spin_unlock(&vcpu->arch.reset_cntx_lock);
147-
148-
if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
151+
if (kvm_riscv_vcpu_alloc_vector_context(vcpu))
149152
return -ENOMEM;
150153

151-
/* By default, make CY, TM, and IR counters accessible in VU mode */
152-
reset_csr->scounteren = 0x7;
153-
154154
/* Setup VCPU timer */
155155
kvm_riscv_vcpu_timer_init(vcpu);
156156

@@ -169,7 +169,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
169169
kvm_riscv_vcpu_sbi_init(vcpu);
170170

171171
/* Reset VCPU */
172-
kvm_riscv_reset_vcpu(vcpu);
172+
kvm_riscv_reset_vcpu(vcpu, false);
173173

174174
return 0;
175175
}
@@ -518,6 +518,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
518518
case KVM_MP_STATE_STOPPED:
519519
__kvm_riscv_vcpu_power_off(vcpu);
520520
break;
521+
case KVM_MP_STATE_INIT_RECEIVED:
522+
if (vcpu->kvm->arch.mp_state_reset)
523+
kvm_riscv_reset_vcpu(vcpu, false);
524+
else
525+
ret = -EINVAL;
526+
break;
521527
default:
522528
ret = -EINVAL;
523529
}
@@ -706,7 +712,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
706712
}
707713

708714
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
709-
kvm_riscv_reset_vcpu(vcpu);
715+
kvm_riscv_reset_vcpu(vcpu, true);
710716

711717
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
712718
kvm_riscv_gstage_update_hgatp(vcpu);

arch/riscv/kvm/vcpu_sbi.c

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,9 +143,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
143143
struct kvm_vcpu *tmp;
144144

145145
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
146-
spin_lock(&vcpu->arch.mp_state_lock);
146+
spin_lock(&tmp->arch.mp_state_lock);
147147
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
148-
spin_unlock(&vcpu->arch.mp_state_lock);
148+
spin_unlock(&tmp->arch.mp_state_lock);
149149
}
150150
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
151151

@@ -156,6 +156,34 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
156156
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
157157
}
158158

159+
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
160+
unsigned long pc, unsigned long a1)
161+
{
162+
spin_lock(&vcpu->arch.reset_state.lock);
163+
vcpu->arch.reset_state.pc = pc;
164+
vcpu->arch.reset_state.a1 = a1;
165+
spin_unlock(&vcpu->arch.reset_state.lock);
166+
167+
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
168+
}
169+
170+
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
171+
{
172+
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
173+
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
174+
struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
175+
176+
cntx->a0 = vcpu->vcpu_id;
177+
178+
spin_lock(&vcpu->arch.reset_state.lock);
179+
cntx->sepc = reset_state->pc;
180+
cntx->a1 = reset_state->a1;
181+
spin_unlock(&vcpu->arch.reset_state.lock);
182+
183+
cntx->sstatus &= ~SR_SIE;
184+
csr->vsatp = 0;
185+
}
186+
159187
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
160188
{
161189
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;

arch/riscv/kvm/vcpu_sbi_hsm.c

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616
static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
1717
{
18-
struct kvm_cpu_context *reset_cntx;
1918
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
2019
struct kvm_vcpu *target_vcpu;
2120
unsigned long target_vcpuid = cp->a0;
@@ -32,17 +31,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
3231
goto out;
3332
}
3433

35-
spin_lock(&target_vcpu->arch.reset_cntx_lock);
36-
reset_cntx = &target_vcpu->arch.guest_reset_context;
37-
/* start address */
38-
reset_cntx->sepc = cp->a1;
39-
/* target vcpu id to start */
40-
reset_cntx->a0 = target_vcpuid;
41-
/* private data passed from kernel */
42-
reset_cntx->a1 = cp->a2;
43-
spin_unlock(&target_vcpu->arch.reset_cntx_lock);
44-
45-
kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
34+
kvm_riscv_vcpu_sbi_request_reset(target_vcpu, cp->a1, cp->a2);
4635

4736
__kvm_riscv_vcpu_power_on(target_vcpu);
4837

arch/riscv/kvm/vcpu_sbi_system.c

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
1313
struct kvm_vcpu_sbi_return *retdata)
1414
{
1515
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
16-
struct kvm_cpu_context *reset_cntx;
1716
unsigned long funcid = cp->a6;
1817
unsigned long hva, i;
1918
struct kvm_vcpu *tmp;
@@ -45,14 +44,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
4544
}
4645
}
4746

48-
spin_lock(&vcpu->arch.reset_cntx_lock);
49-
reset_cntx = &vcpu->arch.guest_reset_context;
50-
reset_cntx->sepc = cp->a1;
51-
reset_cntx->a0 = vcpu->vcpu_id;
52-
reset_cntx->a1 = cp->a2;
53-
spin_unlock(&vcpu->arch.reset_cntx_lock);
54-
55-
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
47+
kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2);
5648

5749
/* userspace provides the suspend implementation */
5850
kvm_riscv_vcpu_sbi_forward(vcpu, run);

arch/riscv/kvm/vcpu_vector.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,9 @@ void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
2222
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
2323

2424
cntx->sstatus &= ~SR_VS;
25+
26+
cntx->vector.vlenb = riscv_v_vsize / 32;
27+
2528
if (riscv_isa_extension_available(isa, v)) {
2629
cntx->sstatus |= SR_VS_INITIAL;
2730
WARN_ON(!cntx->vector.datap);
@@ -70,13 +73,11 @@ void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
7073
__kvm_riscv_vector_restore(cntx);
7174
}
7275

73-
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
74-
struct kvm_cpu_context *cntx)
76+
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
7577
{
76-
cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
77-
if (!cntx->vector.datap)
78+
vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
79+
if (!vcpu->arch.guest_context.vector.datap)
7880
return -ENOMEM;
79-
cntx->vector.vlenb = riscv_v_vsize / 32;
8081

8182
vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
8283
if (!vcpu->arch.host_context.vector.datap)
@@ -87,7 +88,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
8788

8889
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
8990
{
90-
kfree(vcpu->arch.guest_reset_context.vector.datap);
91+
kfree(vcpu->arch.guest_context.vector.datap);
9192
kfree(vcpu->arch.host_context.vector.datap);
9293
}
9394
#endif

0 commit comments

Comments
 (0)