|
3 | 3 | * Copyright (c) 2022 Ventana Micro Systems Inc.
|
4 | 4 | */
|
5 | 5 |
|
6 |
| -#include <linux/bitops.h> |
| 6 | +#include <linux/bitmap.h> |
| 7 | +#include <linux/cpumask.h> |
7 | 8 | #include <linux/errno.h>
|
8 | 9 | #include <linux/err.h>
|
9 | 10 | #include <linux/module.h>
|
| 11 | +#include <linux/smp.h> |
10 | 12 | #include <linux/kvm_host.h>
|
| 13 | +#include <asm/cacheflush.h> |
11 | 14 | #include <asm/csr.h>
|
12 | 15 |
|
13 | 16 | /*
|
@@ -211,3 +214,225 @@ void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
|
211 | 214 |
|
212 | 215 | csr_write(CSR_HGATP, hgatp);
|
213 | 216 | }
|
| 217 | + |
| 218 | +void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu) |
| 219 | +{ |
| 220 | + local_flush_icache_all(); |
| 221 | +} |
| 222 | + |
| 223 | +void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu) |
| 224 | +{ |
| 225 | + struct kvm_vmid *vmid; |
| 226 | + |
| 227 | + vmid = &vcpu->kvm->arch.vmid; |
| 228 | + kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid)); |
| 229 | +} |
| 230 | + |
| 231 | +void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu) |
| 232 | +{ |
| 233 | + struct kvm_vmid *vmid; |
| 234 | + |
| 235 | + vmid = &vcpu->kvm->arch.vmid; |
| 236 | + kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid)); |
| 237 | +} |
| 238 | + |
| 239 | +static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu, |
| 240 | + struct kvm_riscv_hfence *out_data) |
| 241 | +{ |
| 242 | + bool ret = false; |
| 243 | + struct kvm_vcpu_arch *varch = &vcpu->arch; |
| 244 | + |
| 245 | + spin_lock(&varch->hfence_lock); |
| 246 | + |
| 247 | + if (varch->hfence_queue[varch->hfence_head].type) { |
| 248 | + memcpy(out_data, &varch->hfence_queue[varch->hfence_head], |
| 249 | + sizeof(*out_data)); |
| 250 | + varch->hfence_queue[varch->hfence_head].type = 0; |
| 251 | + |
| 252 | + varch->hfence_head++; |
| 253 | + if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE) |
| 254 | + varch->hfence_head = 0; |
| 255 | + |
| 256 | + ret = true; |
| 257 | + } |
| 258 | + |
| 259 | + spin_unlock(&varch->hfence_lock); |
| 260 | + |
| 261 | + return ret; |
| 262 | +} |
| 263 | + |
| 264 | +static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu, |
| 265 | + const struct kvm_riscv_hfence *data) |
| 266 | +{ |
| 267 | + bool ret = false; |
| 268 | + struct kvm_vcpu_arch *varch = &vcpu->arch; |
| 269 | + |
| 270 | + spin_lock(&varch->hfence_lock); |
| 271 | + |
| 272 | + if (!varch->hfence_queue[varch->hfence_tail].type) { |
| 273 | + memcpy(&varch->hfence_queue[varch->hfence_tail], |
| 274 | + data, sizeof(*data)); |
| 275 | + |
| 276 | + varch->hfence_tail++; |
| 277 | + if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE) |
| 278 | + varch->hfence_tail = 0; |
| 279 | + |
| 280 | + ret = true; |
| 281 | + } |
| 282 | + |
| 283 | + spin_unlock(&varch->hfence_lock); |
| 284 | + |
| 285 | + return ret; |
| 286 | +} |
| 287 | + |
| 288 | +void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu) |
| 289 | +{ |
| 290 | + struct kvm_riscv_hfence d = { 0 }; |
| 291 | + struct kvm_vmid *v = &vcpu->kvm->arch.vmid; |
| 292 | + |
| 293 | + while (vcpu_hfence_dequeue(vcpu, &d)) { |
| 294 | + switch (d.type) { |
| 295 | + case KVM_RISCV_HFENCE_UNKNOWN: |
| 296 | + break; |
| 297 | + case KVM_RISCV_HFENCE_GVMA_VMID_GPA: |
| 298 | + kvm_riscv_local_hfence_gvma_vmid_gpa( |
| 299 | + READ_ONCE(v->vmid), |
| 300 | + d.addr, d.size, d.order); |
| 301 | + break; |
| 302 | + case KVM_RISCV_HFENCE_VVMA_ASID_GVA: |
| 303 | + kvm_riscv_local_hfence_vvma_asid_gva( |
| 304 | + READ_ONCE(v->vmid), d.asid, |
| 305 | + d.addr, d.size, d.order); |
| 306 | + break; |
| 307 | + case KVM_RISCV_HFENCE_VVMA_ASID_ALL: |
| 308 | + kvm_riscv_local_hfence_vvma_asid_all( |
| 309 | + READ_ONCE(v->vmid), d.asid); |
| 310 | + break; |
| 311 | + case KVM_RISCV_HFENCE_VVMA_GVA: |
| 312 | + kvm_riscv_local_hfence_vvma_gva( |
| 313 | + READ_ONCE(v->vmid), |
| 314 | + d.addr, d.size, d.order); |
| 315 | + break; |
| 316 | + default: |
| 317 | + break; |
| 318 | + } |
| 319 | + } |
| 320 | +} |
| 321 | + |
| 322 | +static void make_xfence_request(struct kvm *kvm, |
| 323 | + unsigned long hbase, unsigned long hmask, |
| 324 | + unsigned int req, unsigned int fallback_req, |
| 325 | + const struct kvm_riscv_hfence *data) |
| 326 | +{ |
| 327 | + unsigned long i; |
| 328 | + struct kvm_vcpu *vcpu; |
| 329 | + unsigned int actual_req = req; |
| 330 | + DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); |
| 331 | + |
| 332 | + bitmap_clear(vcpu_mask, 0, KVM_MAX_VCPUS); |
| 333 | + kvm_for_each_vcpu(i, vcpu, kvm) { |
| 334 | + if (hbase != -1UL) { |
| 335 | + if (vcpu->vcpu_id < hbase) |
| 336 | + continue; |
| 337 | + if (!(hmask & (1UL << (vcpu->vcpu_id - hbase)))) |
| 338 | + continue; |
| 339 | + } |
| 340 | + |
| 341 | + bitmap_set(vcpu_mask, i, 1); |
| 342 | + |
| 343 | + if (!data || !data->type) |
| 344 | + continue; |
| 345 | + |
| 346 | + /* |
| 347 | + * Enqueue hfence data to VCPU hfence queue. If we don't |
| 348 | + * have space in the VCPU hfence queue then fallback to |
| 349 | + * a more conservative hfence request. |
| 350 | + */ |
| 351 | + if (!vcpu_hfence_enqueue(vcpu, data)) |
| 352 | + actual_req = fallback_req; |
| 353 | + } |
| 354 | + |
| 355 | + kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask); |
| 356 | +} |
| 357 | + |
| 358 | +void kvm_riscv_fence_i(struct kvm *kvm, |
| 359 | + unsigned long hbase, unsigned long hmask) |
| 360 | +{ |
| 361 | + make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I, |
| 362 | + KVM_REQ_FENCE_I, NULL); |
| 363 | +} |
| 364 | + |
| 365 | +void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, |
| 366 | + unsigned long hbase, unsigned long hmask, |
| 367 | + gpa_t gpa, gpa_t gpsz, |
| 368 | + unsigned long order) |
| 369 | +{ |
| 370 | + struct kvm_riscv_hfence data; |
| 371 | + |
| 372 | + data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA; |
| 373 | + data.asid = 0; |
| 374 | + data.addr = gpa; |
| 375 | + data.size = gpsz; |
| 376 | + data.order = order; |
| 377 | + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, |
| 378 | + KVM_REQ_HFENCE_GVMA_VMID_ALL, &data); |
| 379 | +} |
| 380 | + |
| 381 | +void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, |
| 382 | + unsigned long hbase, unsigned long hmask) |
| 383 | +{ |
| 384 | + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL, |
| 385 | + KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL); |
| 386 | +} |
| 387 | + |
| 388 | +void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, |
| 389 | + unsigned long hbase, unsigned long hmask, |
| 390 | + unsigned long gva, unsigned long gvsz, |
| 391 | + unsigned long order, unsigned long asid) |
| 392 | +{ |
| 393 | + struct kvm_riscv_hfence data; |
| 394 | + |
| 395 | + data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA; |
| 396 | + data.asid = asid; |
| 397 | + data.addr = gva; |
| 398 | + data.size = gvsz; |
| 399 | + data.order = order; |
| 400 | + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, |
| 401 | + KVM_REQ_HFENCE_VVMA_ALL, &data); |
| 402 | +} |
| 403 | + |
| 404 | +void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, |
| 405 | + unsigned long hbase, unsigned long hmask, |
| 406 | + unsigned long asid) |
| 407 | +{ |
| 408 | + struct kvm_riscv_hfence data; |
| 409 | + |
| 410 | + data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL; |
| 411 | + data.asid = asid; |
| 412 | + data.addr = data.size = data.order = 0; |
| 413 | + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, |
| 414 | + KVM_REQ_HFENCE_VVMA_ALL, &data); |
| 415 | +} |
| 416 | + |
| 417 | +void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, |
| 418 | + unsigned long hbase, unsigned long hmask, |
| 419 | + unsigned long gva, unsigned long gvsz, |
| 420 | + unsigned long order) |
| 421 | +{ |
| 422 | + struct kvm_riscv_hfence data; |
| 423 | + |
| 424 | + data.type = KVM_RISCV_HFENCE_VVMA_GVA; |
| 425 | + data.asid = 0; |
| 426 | + data.addr = gva; |
| 427 | + data.size = gvsz; |
| 428 | + data.order = order; |
| 429 | + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE, |
| 430 | + KVM_REQ_HFENCE_VVMA_ALL, &data); |
| 431 | +} |
| 432 | + |
| 433 | +void kvm_riscv_hfence_vvma_all(struct kvm *kvm, |
| 434 | + unsigned long hbase, unsigned long hmask) |
| 435 | +{ |
| 436 | + make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL, |
| 437 | + KVM_REQ_HFENCE_VVMA_ALL, NULL); |
| 438 | +} |
0 commit comments