Skip to content

Commit 6a29b51

Browse files
committed
Merge tag 'kvm-arm-for-4.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
KVM/ARM updates for v4.11-rc2 vgic updates: - Honour disabling the ITS - Don't deadlock when deactivating own interrupts via MMIO - Correctly expose the lact of IRQ/FIQ bypass on GICv3 I/O virtualization: - Make KVM_CAP_NR_MEMSLOTS big enough for large guests with many PCIe devices General bug fixes: - Gracefully handle exception generated with syndroms that the host doesn't understand - Properly invalidate TLBs on VHE systems
2 parents 05d8d34 + 955a3fc commit 6a29b51

File tree

12 files changed

+183
-79
lines changed

12 files changed

+183
-79
lines changed

Documentation/virtual/kvm/api.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory
951951
slot. When changing an existing slot, it may be moved in the guest
952952
physical memory space, or its flags may be modified. It may not be
953953
resized. Slots may not overlap in guest physical address space.
954+
Bits 0-15 of "slot" specifies the slot id and this value should be
955+
less than the maximum number of user memory slots supported per VM.
956+
The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
957+
if this capability is supported by the architecture.
954958

955959
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
956960
specifies the address space which is being modified. They must be

arch/arm/include/asm/kvm_arm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,7 @@
209209
#define HSR_EC_IABT_HYP (0x21)
210210
#define HSR_EC_DABT (0x24)
211211
#define HSR_EC_DABT_HYP (0x25)
212+
#define HSR_EC_MAX (0x3f)
212213

213214
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
214215

arch/arm/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
3131

3232
#define KVM_USER_MEM_SLOTS 32
33-
#define KVM_PRIVATE_MEM_SLOTS 4
3433
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
3534
#define KVM_HAVE_ONE_REG
3635
#define KVM_HALT_POLL_NS_DEFAULT 500000

arch/arm/kvm/arm.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
221221
case KVM_CAP_MAX_VCPUS:
222222
r = KVM_MAX_VCPUS;
223223
break;
224+
case KVM_CAP_NR_MEMSLOTS:
225+
r = KVM_USER_MEM_SLOTS;
226+
break;
224227
case KVM_CAP_MSI_DEVID:
225228
if (!kvm)
226229
r = -EINVAL;

arch/arm/kvm/handle_exit.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
7979
return 1;
8080
}
8181

82+
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
83+
{
84+
u32 hsr = kvm_vcpu_get_hsr(vcpu);
85+
86+
kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
87+
hsr);
88+
89+
kvm_inject_undefined(vcpu);
90+
return 1;
91+
}
92+
8293
static exit_handle_fn arm_exit_handlers[] = {
94+
[0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
8395
[HSR_EC_WFI] = kvm_handle_wfx,
8496
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
8597
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
98110
{
99111
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
100112

101-
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
102-
!arm_exit_handlers[hsr_ec]) {
103-
kvm_err("Unknown exception class: hsr: %#08x\n",
104-
(unsigned int)kvm_vcpu_get_hsr(vcpu));
105-
BUG();
106-
}
107-
108113
return arm_exit_handlers[hsr_ec];
109114
}
110115

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,7 @@
3030

3131
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
3232

33-
#define KVM_USER_MEM_SLOTS 32
34-
#define KVM_PRIVATE_MEM_SLOTS 4
33+
#define KVM_USER_MEM_SLOTS 512
3534
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
3635
#define KVM_HALT_POLL_NS_DEFAULT 500000
3736

arch/arm64/kvm/handle_exit.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
135135
return ret;
136136
}
137137

138+
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
139+
{
140+
u32 hsr = kvm_vcpu_get_hsr(vcpu);
141+
142+
kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
143+
hsr, esr_get_class_string(hsr));
144+
145+
kvm_inject_undefined(vcpu);
146+
return 1;
147+
}
148+
138149
static exit_handle_fn arm_exit_handlers[] = {
150+
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
139151
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
140152
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
141153
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
@@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
162174
u32 hsr = kvm_vcpu_get_hsr(vcpu);
163175
u8 hsr_ec = ESR_ELx_EC(hsr);
164176

165-
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
166-
!arm_exit_handlers[hsr_ec]) {
167-
kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
168-
hsr, esr_get_class_string(hsr));
169-
BUG();
170-
}
171-
172177
return arm_exit_handlers[hsr_ec];
173178
}
174179

arch/arm64/kvm/hyp/tlb.c

Lines changed: 55 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,62 @@
1818
#include <asm/kvm_hyp.h>
1919
#include <asm/tlbflush.h>
2020

21+
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
22+
{
23+
u64 val;
24+
25+
/*
26+
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
27+
* most TLB operations target EL2/EL0. In order to affect the
28+
* guest TLBs (EL1/EL0), we need to change one of these two
29+
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
30+
* let's flip TGE before executing the TLB operation.
31+
*/
32+
write_sysreg(kvm->arch.vttbr, vttbr_el2);
33+
val = read_sysreg(hcr_el2);
34+
val &= ~HCR_TGE;
35+
write_sysreg(val, hcr_el2);
36+
isb();
37+
}
38+
39+
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
40+
{
41+
write_sysreg(kvm->arch.vttbr, vttbr_el2);
42+
isb();
43+
}
44+
45+
static hyp_alternate_select(__tlb_switch_to_guest,
46+
__tlb_switch_to_guest_nvhe,
47+
__tlb_switch_to_guest_vhe,
48+
ARM64_HAS_VIRT_HOST_EXTN);
49+
50+
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
51+
{
52+
/*
53+
* We're done with the TLB operation, let's restore the host's
54+
* view of HCR_EL2.
55+
*/
56+
write_sysreg(0, vttbr_el2);
57+
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
58+
}
59+
60+
static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
61+
{
62+
write_sysreg(0, vttbr_el2);
63+
}
64+
65+
static hyp_alternate_select(__tlb_switch_to_host,
66+
__tlb_switch_to_host_nvhe,
67+
__tlb_switch_to_host_vhe,
68+
ARM64_HAS_VIRT_HOST_EXTN);
69+
2170
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
2271
{
2372
dsb(ishst);
2473

2574
/* Switch to requested VMID */
2675
kvm = kern_hyp_va(kvm);
27-
write_sysreg(kvm->arch.vttbr, vttbr_el2);
28-
isb();
76+
__tlb_switch_to_guest()(kvm);
2977

3078
/*
3179
* We could do so much better if we had the VA as well.
@@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
4694
dsb(ish);
4795
isb();
4896

49-
write_sysreg(0, vttbr_el2);
97+
__tlb_switch_to_host()(kvm);
5098
}
5199

52100
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -55,29 +103,27 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
55103

56104
/* Switch to requested VMID */
57105
kvm = kern_hyp_va(kvm);
58-
write_sysreg(kvm->arch.vttbr, vttbr_el2);
59-
isb();
106+
__tlb_switch_to_guest()(kvm);
60107

61108
__tlbi(vmalls12e1is);
62109
dsb(ish);
63110
isb();
64111

65-
write_sysreg(0, vttbr_el2);
112+
__tlb_switch_to_host()(kvm);
66113
}
67114

68115
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
69116
{
70117
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
71118

72119
/* Switch to requested VMID */
73-
write_sysreg(kvm->arch.vttbr, vttbr_el2);
74-
isb();
120+
__tlb_switch_to_guest()(kvm);
75121

76122
__tlbi(vmalle1);
77123
dsb(nsh);
78124
isb();
79125

80-
write_sysreg(0, vttbr_el2);
126+
__tlb_switch_to_host()(kvm);
81127
}
82128

83129
void __hyp_text __kvm_flush_vm_context(void)

include/linux/irqchip/arm-gic-v3.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,8 @@
373373
#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT)
374374
#define ICC_IGRPEN1_EL1_SHIFT 0
375375
#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT)
376+
#define ICC_SRE_EL1_DIB (1U << 2)
377+
#define ICC_SRE_EL1_DFB (1U << 1)
376378
#define ICC_SRE_EL1_SRE (1U << 0)
377379

378380
/*

virt/kvm/arm/vgic/vgic-its.c

Lines changed: 65 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
360360
return ret;
361361
}
362362

363-
static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
364-
struct vgic_its *its,
365-
gpa_t addr, unsigned int len)
366-
{
367-
u32 reg = 0;
368-
369-
mutex_lock(&its->cmd_lock);
370-
if (its->creadr == its->cwriter)
371-
reg |= GITS_CTLR_QUIESCENT;
372-
if (its->enabled)
373-
reg |= GITS_CTLR_ENABLE;
374-
mutex_unlock(&its->cmd_lock);
375-
376-
return reg;
377-
}
378-
379-
static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
380-
gpa_t addr, unsigned int len,
381-
unsigned long val)
382-
{
383-
its->enabled = !!(val & GITS_CTLR_ENABLE);
384-
}
385-
386363
static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
387364
struct vgic_its *its,
388365
gpa_t addr, unsigned int len)
@@ -1161,33 +1138,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
11611138
#define ITS_CMD_SIZE 32
11621139
#define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
11631140

1164-
/*
1165-
* By writing to CWRITER the guest announces new commands to be processed.
1166-
* To avoid any races in the first place, we take the its_cmd lock, which
1167-
* protects our ring buffer variables, so that there is only one user
1168-
* per ITS handling commands at a given time.
1169-
*/
1170-
static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1171-
gpa_t addr, unsigned int len,
1172-
unsigned long val)
1141+
/* Must be called with the cmd_lock held. */
1142+
static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
11731143
{
11741144
gpa_t cbaser;
11751145
u64 cmd_buf[4];
1176-
u32 reg;
11771146

1178-
if (!its)
1179-
return;
1180-
1181-
mutex_lock(&its->cmd_lock);
1182-
1183-
reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1184-
reg = ITS_CMD_OFFSET(reg);
1185-
if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1186-
mutex_unlock(&its->cmd_lock);
1147+
/* Commands are only processed when the ITS is enabled. */
1148+
if (!its->enabled)
11871149
return;
1188-
}
11891150

1190-
its->cwriter = reg;
11911151
cbaser = CBASER_ADDRESS(its->cbaser);
11921152

11931153
while (its->cwriter != its->creadr) {
@@ -1207,6 +1167,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
12071167
if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
12081168
its->creadr = 0;
12091169
}
1170+
}
1171+
1172+
/*
1173+
* By writing to CWRITER the guest announces new commands to be processed.
1174+
* To avoid any races in the first place, we take the its_cmd lock, which
1175+
* protects our ring buffer variables, so that there is only one user
1176+
* per ITS handling commands at a given time.
1177+
*/
1178+
static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1179+
gpa_t addr, unsigned int len,
1180+
unsigned long val)
1181+
{
1182+
u64 reg;
1183+
1184+
if (!its)
1185+
return;
1186+
1187+
mutex_lock(&its->cmd_lock);
1188+
1189+
reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1190+
reg = ITS_CMD_OFFSET(reg);
1191+
if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1192+
mutex_unlock(&its->cmd_lock);
1193+
return;
1194+
}
1195+
its->cwriter = reg;
1196+
1197+
vgic_its_process_commands(kvm, its);
12101198

12111199
mutex_unlock(&its->cmd_lock);
12121200
}
@@ -1287,6 +1275,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
12871275
*regptr = reg;
12881276
}
12891277

1278+
static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1279+
struct vgic_its *its,
1280+
gpa_t addr, unsigned int len)
1281+
{
1282+
u32 reg = 0;
1283+
1284+
mutex_lock(&its->cmd_lock);
1285+
if (its->creadr == its->cwriter)
1286+
reg |= GITS_CTLR_QUIESCENT;
1287+
if (its->enabled)
1288+
reg |= GITS_CTLR_ENABLE;
1289+
mutex_unlock(&its->cmd_lock);
1290+
1291+
return reg;
1292+
}
1293+
1294+
static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1295+
gpa_t addr, unsigned int len,
1296+
unsigned long val)
1297+
{
1298+
mutex_lock(&its->cmd_lock);
1299+
1300+
its->enabled = !!(val & GITS_CTLR_ENABLE);
1301+
1302+
/*
1303+
* Try to process any pending commands. This function bails out early
1304+
* if the ITS is disabled or no commands have been queued.
1305+
*/
1306+
vgic_its_process_commands(kvm, its);
1307+
1308+
mutex_unlock(&its->cmd_lock);
1309+
}
1310+
12901311
#define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
12911312
{ \
12921313
.reg_offset = off, \

0 commit comments

Comments
 (0)