Skip to content

Commit 020df07

Browse files
Gleb Natapovavikivity
authored andcommitted
KVM: move DR register access handling into generic code
Currently both SVM and VMX have their own DR handling code. Move it to x86.c. Acked-by: Jan Kiszka <[email protected]> Signed-off-by: Gleb Natapov <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
1 parent 6bc31bd commit 020df07

File tree

4 files changed

+93
-134
lines changed

4 files changed

+93
-134
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -496,8 +496,7 @@ struct kvm_x86_ops {
496496
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
497497
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
498498
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
499-
int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest);
500-
int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value);
499+
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
501500
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
502501
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
503502
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -602,6 +601,8 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
602601
void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
603602
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
604603
void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
604+
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
605+
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
605606
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
606607
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
607608
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);

arch/x86/kvm/svm.c

Lines changed: 3 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -1307,70 +1307,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
13071307
svm->vmcb->control.asid = sd->next_asid++;
13081308
}
13091309

1310-
static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
1310+
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
13111311
{
13121312
struct vcpu_svm *svm = to_svm(vcpu);
13131313

1314-
switch (dr) {
1315-
case 0 ... 3:
1316-
*dest = vcpu->arch.db[dr];
1317-
break;
1318-
case 4:
1319-
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1320-
return EMULATE_FAIL; /* will re-inject UD */
1321-
/* fall through */
1322-
case 6:
1323-
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1324-
*dest = vcpu->arch.dr6;
1325-
else
1326-
*dest = svm->vmcb->save.dr6;
1327-
break;
1328-
case 5:
1329-
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1330-
return EMULATE_FAIL; /* will re-inject UD */
1331-
/* fall through */
1332-
case 7:
1333-
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1334-
*dest = vcpu->arch.dr7;
1335-
else
1336-
*dest = svm->vmcb->save.dr7;
1337-
break;
1338-
}
1339-
1340-
return EMULATE_DONE;
1341-
}
1342-
1343-
static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
1344-
{
1345-
struct vcpu_svm *svm = to_svm(vcpu);
1346-
1347-
switch (dr) {
1348-
case 0 ... 3:
1349-
vcpu->arch.db[dr] = value;
1350-
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1351-
vcpu->arch.eff_db[dr] = value;
1352-
break;
1353-
case 4:
1354-
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1355-
return EMULATE_FAIL; /* will re-inject UD */
1356-
/* fall through */
1357-
case 6:
1358-
vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1359-
break;
1360-
case 5:
1361-
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1362-
return EMULATE_FAIL; /* will re-inject UD */
1363-
/* fall through */
1364-
case 7:
1365-
vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1366-
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1367-
svm->vmcb->save.dr7 = vcpu->arch.dr7;
1368-
vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1369-
}
1370-
break;
1371-
}
1372-
1373-
return EMULATE_DONE;
1314+
svm->vmcb->save.dr7 = value;
13741315
}
13751316

13761317
static int pf_interception(struct vcpu_svm *svm)
@@ -3302,8 +3243,7 @@ static struct kvm_x86_ops svm_x86_ops = {
33023243
.set_idt = svm_set_idt,
33033244
.get_gdt = svm_get_gdt,
33043245
.set_gdt = svm_set_gdt,
3305-
.get_dr = svm_get_dr,
3306-
.set_dr = svm_set_dr,
3246+
.set_dr7 = svm_set_dr7,
33073247
.cache_reg = svm_cache_reg,
33083248
.get_rflags = svm_get_rflags,
33093249
.set_rflags = svm_set_rflags,

arch/x86/kvm/vmx.c

Lines changed: 11 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -3089,19 +3089,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
30893089
return 0;
30903090
}
30913091

3092-
static int check_dr_alias(struct kvm_vcpu *vcpu)
3093-
{
3094-
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
3095-
kvm_queue_exception(vcpu, UD_VECTOR);
3096-
return -1;
3097-
}
3098-
return 0;
3099-
}
3100-
31013092
static int handle_dr(struct kvm_vcpu *vcpu)
31023093
{
31033094
unsigned long exit_qualification;
3104-
unsigned long val;
31053095
int dr, reg;
31063096

31073097
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
@@ -3136,67 +3126,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
31363126
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
31373127
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
31383128
if (exit_qualification & TYPE_MOV_FROM_DR) {
3139-
switch (dr) {
3140-
case 0 ... 3:
3141-
val = vcpu->arch.db[dr];
3142-
break;
3143-
case 4:
3144-
if (check_dr_alias(vcpu) < 0)
3145-
return 1;
3146-
/* fall through */
3147-
case 6:
3148-
val = vcpu->arch.dr6;
3149-
break;
3150-
case 5:
3151-
if (check_dr_alias(vcpu) < 0)
3152-
return 1;
3153-
/* fall through */
3154-
default: /* 7 */
3155-
val = vcpu->arch.dr7;
3156-
break;
3157-
}
3158-
kvm_register_write(vcpu, reg, val);
3159-
} else {
3160-
val = vcpu->arch.regs[reg];
3161-
switch (dr) {
3162-
case 0 ... 3:
3163-
vcpu->arch.db[dr] = val;
3164-
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
3165-
vcpu->arch.eff_db[dr] = val;
3166-
break;
3167-
case 4:
3168-
if (check_dr_alias(vcpu) < 0)
3169-
return 1;
3170-
/* fall through */
3171-
case 6:
3172-
if (val & 0xffffffff00000000ULL) {
3173-
kvm_inject_gp(vcpu, 0);
3174-
return 1;
3175-
}
3176-
vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
3177-
break;
3178-
case 5:
3179-
if (check_dr_alias(vcpu) < 0)
3180-
return 1;
3181-
/* fall through */
3182-
default: /* 7 */
3183-
if (val & 0xffffffff00000000ULL) {
3184-
kvm_inject_gp(vcpu, 0);
3185-
return 1;
3186-
}
3187-
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
3188-
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
3189-
vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
3190-
vcpu->arch.switch_db_regs =
3191-
(val & DR7_BP_EN_MASK);
3192-
}
3193-
break;
3194-
}
3195-
}
3129+
unsigned long val;
3130+
if (!kvm_get_dr(vcpu, dr, &val))
3131+
kvm_register_write(vcpu, reg, val);
3132+
} else
3133+
kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
31963134
skip_emulated_instruction(vcpu);
31973135
return 1;
31983136
}
31993137

3138+
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
3139+
{
3140+
vmcs_writel(GUEST_DR7, val);
3141+
}
3142+
32003143
static int handle_cpuid(struct kvm_vcpu *vcpu)
32013144
{
32023145
kvm_emulate_cpuid(vcpu);
@@ -4187,6 +4130,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
41874130
.set_idt = vmx_set_idt,
41884131
.get_gdt = vmx_get_gdt,
41894132
.set_gdt = vmx_set_gdt,
4133+
.set_dr7 = vmx_set_dr7,
41904134
.cache_reg = vmx_cache_reg,
41914135
.get_rflags = vmx_get_rflags,
41924136
.set_rflags = vmx_set_rflags,

arch/x86/kvm/x86.c

Lines changed: 76 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -562,6 +562,80 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
562562
}
563563
EXPORT_SYMBOL_GPL(kvm_get_cr8);
564564

565+
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
566+
{
567+
switch (dr) {
568+
case 0 ... 3:
569+
vcpu->arch.db[dr] = val;
570+
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
571+
vcpu->arch.eff_db[dr] = val;
572+
break;
573+
case 4:
574+
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
575+
kvm_queue_exception(vcpu, UD_VECTOR);
576+
return 1;
577+
}
578+
/* fall through */
579+
case 6:
580+
if (val & 0xffffffff00000000ULL) {
581+
kvm_inject_gp(vcpu, 0);
582+
return 1;
583+
}
584+
vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
585+
break;
586+
case 5:
587+
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
588+
kvm_queue_exception(vcpu, UD_VECTOR);
589+
return 1;
590+
}
591+
/* fall through */
592+
default: /* 7 */
593+
if (val & 0xffffffff00000000ULL) {
594+
kvm_inject_gp(vcpu, 0);
595+
return 1;
596+
}
597+
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
598+
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
599+
kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
600+
vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
601+
}
602+
break;
603+
}
604+
605+
return 0;
606+
}
607+
EXPORT_SYMBOL_GPL(kvm_set_dr);
608+
609+
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
610+
{
611+
switch (dr) {
612+
case 0 ... 3:
613+
*val = vcpu->arch.db[dr];
614+
break;
615+
case 4:
616+
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
617+
kvm_queue_exception(vcpu, UD_VECTOR);
618+
return 1;
619+
}
620+
/* fall through */
621+
case 6:
622+
*val = vcpu->arch.dr6;
623+
break;
624+
case 5:
625+
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) {
626+
kvm_queue_exception(vcpu, UD_VECTOR);
627+
return 1;
628+
}
629+
/* fall through */
630+
default: /* 7 */
631+
*val = vcpu->arch.dr7;
632+
break;
633+
}
634+
635+
return 0;
636+
}
637+
EXPORT_SYMBOL_GPL(kvm_get_dr);
638+
565639
static inline u32 bit(int bitno)
566640
{
567641
return 1 << (bitno & 31);
@@ -3483,14 +3557,14 @@ int emulate_clts(struct kvm_vcpu *vcpu)
34833557

34843558
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
34853559
{
3486-
return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
3560+
return kvm_get_dr(ctxt->vcpu, dr, dest);
34873561
}
34883562

34893563
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
34903564
{
34913565
unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
34923566

3493-
return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
3567+
return kvm_set_dr(ctxt->vcpu, dr, value & mask);
34943568
}
34953569

34963570
void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)

0 commit comments

Comments
 (0)