@@ -140,6 +140,14 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
140
140
141
141
#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
142
142
143
+ #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
144
+ RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
145
+ RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
146
+ RTIT_STATUS_BYTECNT))
147
+
148
+ #define MSR_IA32_RTIT_OUTPUT_BASE_MASK \
149
+ (~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f)
150
+
143
151
/*
144
152
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
145
153
* ple_gap: upper bound on the amount of time between two successive
@@ -1354,6 +1362,79 @@ void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1354
1362
vmcs_write32 (GUEST_INTERRUPTIBILITY_INFO , interruptibility );
1355
1363
}
1356
1364
1365
+ static int vmx_rtit_ctl_check (struct kvm_vcpu * vcpu , u64 data )
1366
+ {
1367
+ struct vcpu_vmx * vmx = to_vmx (vcpu );
1368
+ unsigned long value ;
1369
+
1370
+ /*
1371
+ * Any MSR write that attempts to change bits marked reserved will
1372
+ * case a #GP fault.
1373
+ */
1374
+ if (data & vmx -> pt_desc .ctl_bitmask )
1375
+ return 1 ;
1376
+
1377
+ /*
1378
+ * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1379
+ * result in a #GP unless the same write also clears TraceEn.
1380
+ */
1381
+ if ((vmx -> pt_desc .guest .ctl & RTIT_CTL_TRACEEN ) &&
1382
+ ((vmx -> pt_desc .guest .ctl ^ data ) & ~RTIT_CTL_TRACEEN ))
1383
+ return 1 ;
1384
+
1385
+ /*
1386
+ * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1387
+ * and FabricEn would cause #GP, if
1388
+ * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1389
+ */
1390
+ if ((data & RTIT_CTL_TRACEEN ) && !(data & RTIT_CTL_TOPA ) &&
1391
+ !(data & RTIT_CTL_FABRIC_EN ) &&
1392
+ !intel_pt_validate_cap (vmx -> pt_desc .caps ,
1393
+ PT_CAP_single_range_output ))
1394
+ return 1 ;
1395
+
1396
+ /*
1397
+ * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1398
+ * utilize encodings marked reserved will casue a #GP fault.
1399
+ */
1400
+ value = intel_pt_validate_cap (vmx -> pt_desc .caps , PT_CAP_mtc_periods );
1401
+ if (intel_pt_validate_cap (vmx -> pt_desc .caps , PT_CAP_mtc ) &&
1402
+ !test_bit ((data & RTIT_CTL_MTC_RANGE ) >>
1403
+ RTIT_CTL_MTC_RANGE_OFFSET , & value ))
1404
+ return 1 ;
1405
+ value = intel_pt_validate_cap (vmx -> pt_desc .caps ,
1406
+ PT_CAP_cycle_thresholds );
1407
+ if (intel_pt_validate_cap (vmx -> pt_desc .caps , PT_CAP_psb_cyc ) &&
1408
+ !test_bit ((data & RTIT_CTL_CYC_THRESH ) >>
1409
+ RTIT_CTL_CYC_THRESH_OFFSET , & value ))
1410
+ return 1 ;
1411
+ value = intel_pt_validate_cap (vmx -> pt_desc .caps , PT_CAP_psb_periods );
1412
+ if (intel_pt_validate_cap (vmx -> pt_desc .caps , PT_CAP_psb_cyc ) &&
1413
+ !test_bit ((data & RTIT_CTL_PSB_FREQ ) >>
1414
+ RTIT_CTL_PSB_FREQ_OFFSET , & value ))
1415
+ return 1 ;
1416
+
1417
+ /*
1418
+ * If ADDRx_CFG is reserved or the encodings is >2 will
1419
+ * cause a #GP fault.
1420
+ */
1421
+ value = (data & RTIT_CTL_ADDR0 ) >> RTIT_CTL_ADDR0_OFFSET ;
1422
+ if ((value && (vmx -> pt_desc .addr_range < 1 )) || (value > 2 ))
1423
+ return 1 ;
1424
+ value = (data & RTIT_CTL_ADDR1 ) >> RTIT_CTL_ADDR1_OFFSET ;
1425
+ if ((value && (vmx -> pt_desc .addr_range < 2 )) || (value > 2 ))
1426
+ return 1 ;
1427
+ value = (data & RTIT_CTL_ADDR2 ) >> RTIT_CTL_ADDR2_OFFSET ;
1428
+ if ((value && (vmx -> pt_desc .addr_range < 3 )) || (value > 2 ))
1429
+ return 1 ;
1430
+ value = (data & RTIT_CTL_ADDR3 ) >> RTIT_CTL_ADDR3_OFFSET ;
1431
+ if ((value && (vmx -> pt_desc .addr_range < 4 )) || (value > 2 ))
1432
+ return 1 ;
1433
+
1434
+ return 0 ;
1435
+ }
1436
+
1437
+
1357
1438
static void skip_emulated_instruction (struct kvm_vcpu * vcpu )
1358
1439
{
1359
1440
unsigned long rip ;
@@ -1555,6 +1636,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1555
1636
{
1556
1637
struct vcpu_vmx * vmx = to_vmx (vcpu );
1557
1638
struct shared_msr_entry * msr ;
1639
+ u32 index ;
1558
1640
1559
1641
switch (msr_info -> index ) {
1560
1642
#ifdef CONFIG_X86_64
@@ -1619,6 +1701,52 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1619
1701
return 1 ;
1620
1702
msr_info -> data = vcpu -> arch .ia32_xss ;
1621
1703
break ;
1704
+ case MSR_IA32_RTIT_CTL :
1705
+ if (pt_mode != PT_MODE_HOST_GUEST )
1706
+ return 1 ;
1707
+ msr_info -> data = vmx -> pt_desc .guest .ctl ;
1708
+ break ;
1709
+ case MSR_IA32_RTIT_STATUS :
1710
+ if (pt_mode != PT_MODE_HOST_GUEST )
1711
+ return 1 ;
1712
+ msr_info -> data = vmx -> pt_desc .guest .status ;
1713
+ break ;
1714
+ case MSR_IA32_RTIT_CR3_MATCH :
1715
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1716
+ !intel_pt_validate_cap (vmx -> pt_desc .caps ,
1717
+ PT_CAP_cr3_filtering ))
1718
+ return 1 ;
1719
+ msr_info -> data = vmx -> pt_desc .guest .cr3_match ;
1720
+ break ;
1721
+ case MSR_IA32_RTIT_OUTPUT_BASE :
1722
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1723
+ (!intel_pt_validate_cap (vmx -> pt_desc .caps ,
1724
+ PT_CAP_topa_output ) &&
1725
+ !intel_pt_validate_cap (vmx -> pt_desc .caps ,
1726
+ PT_CAP_single_range_output )))
1727
+ return 1 ;
1728
+ msr_info -> data = vmx -> pt_desc .guest .output_base ;
1729
+ break ;
1730
+ case MSR_IA32_RTIT_OUTPUT_MASK :
1731
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1732
+ (!intel_pt_validate_cap (vmx -> pt_desc .caps ,
1733
+ PT_CAP_topa_output ) &&
1734
+ !intel_pt_validate_cap (vmx -> pt_desc .caps ,
1735
+ PT_CAP_single_range_output )))
1736
+ return 1 ;
1737
+ msr_info -> data = vmx -> pt_desc .guest .output_mask ;
1738
+ break ;
1739
+ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B :
1740
+ index = msr_info -> index - MSR_IA32_RTIT_ADDR0_A ;
1741
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1742
+ (index >= 2 * intel_pt_validate_cap (vmx -> pt_desc .caps ,
1743
+ PT_CAP_num_address_ranges )))
1744
+ return 1 ;
1745
+ if (index % 2 )
1746
+ msr_info -> data = vmx -> pt_desc .guest .addr_b [index / 2 ];
1747
+ else
1748
+ msr_info -> data = vmx -> pt_desc .guest .addr_a [index / 2 ];
1749
+ break ;
1622
1750
case MSR_TSC_AUX :
1623
1751
if (!msr_info -> host_initiated &&
1624
1752
!guest_cpuid_has (vcpu , X86_FEATURE_RDTSCP ))
@@ -1648,6 +1776,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1648
1776
int ret = 0 ;
1649
1777
u32 msr_index = msr_info -> index ;
1650
1778
u64 data = msr_info -> data ;
1779
+ u32 index ;
1651
1780
1652
1781
switch (msr_index ) {
1653
1782
case MSR_EFER :
@@ -1799,6 +1928,61 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1799
1928
else
1800
1929
clear_atomic_switch_msr (vmx , MSR_IA32_XSS );
1801
1930
break ;
1931
+ case MSR_IA32_RTIT_CTL :
1932
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1933
+ vmx_rtit_ctl_check (vcpu , data ))
1934
+ return 1 ;
1935
+ vmcs_write64 (GUEST_IA32_RTIT_CTL , data );
1936
+ vmx -> pt_desc .guest .ctl = data ;
1937
+ break ;
1938
+ case MSR_IA32_RTIT_STATUS :
1939
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1940
+ (vmx -> pt_desc .guest .ctl & RTIT_CTL_TRACEEN ) ||
1941
+ (data & MSR_IA32_RTIT_STATUS_MASK ))
1942
+ return 1 ;
1943
+ vmx -> pt_desc .guest .status = data ;
1944
+ break ;
1945
+ case MSR_IA32_RTIT_CR3_MATCH :
1946
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1947
+ (vmx -> pt_desc .guest .ctl & RTIT_CTL_TRACEEN ) ||
1948
+ !intel_pt_validate_cap (vmx -> pt_desc .caps ,
1949
+ PT_CAP_cr3_filtering ))
1950
+ return 1 ;
1951
+ vmx -> pt_desc .guest .cr3_match = data ;
1952
+ break ;
1953
+ case MSR_IA32_RTIT_OUTPUT_BASE :
1954
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1955
+ (vmx -> pt_desc .guest .ctl & RTIT_CTL_TRACEEN ) ||
1956
+ (!intel_pt_validate_cap (vmx -> pt_desc .caps ,
1957
+ PT_CAP_topa_output ) &&
1958
+ !intel_pt_validate_cap (vmx -> pt_desc .caps ,
1959
+ PT_CAP_single_range_output )) ||
1960
+ (data & MSR_IA32_RTIT_OUTPUT_BASE_MASK ))
1961
+ return 1 ;
1962
+ vmx -> pt_desc .guest .output_base = data ;
1963
+ break ;
1964
+ case MSR_IA32_RTIT_OUTPUT_MASK :
1965
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1966
+ (vmx -> pt_desc .guest .ctl & RTIT_CTL_TRACEEN ) ||
1967
+ (!intel_pt_validate_cap (vmx -> pt_desc .caps ,
1968
+ PT_CAP_topa_output ) &&
1969
+ !intel_pt_validate_cap (vmx -> pt_desc .caps ,
1970
+ PT_CAP_single_range_output )))
1971
+ return 1 ;
1972
+ vmx -> pt_desc .guest .output_mask = data ;
1973
+ break ;
1974
+ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B :
1975
+ index = msr_info -> index - MSR_IA32_RTIT_ADDR0_A ;
1976
+ if ((pt_mode != PT_MODE_HOST_GUEST ) ||
1977
+ (vmx -> pt_desc .guest .ctl & RTIT_CTL_TRACEEN ) ||
1978
+ (index >= 2 * intel_pt_validate_cap (vmx -> pt_desc .caps ,
1979
+ PT_CAP_num_address_ranges )))
1980
+ return 1 ;
1981
+ if (index % 2 )
1982
+ vmx -> pt_desc .guest .addr_b [index / 2 ] = data ;
1983
+ else
1984
+ vmx -> pt_desc .guest .addr_a [index / 2 ] = data ;
1985
+ break ;
1802
1986
case MSR_TSC_AUX :
1803
1987
if (!msr_info -> host_initiated &&
1804
1988
!guest_cpuid_has (vcpu , X86_FEATURE_RDTSCP ))
0 commit comments