Skip to content

Commit e27b72d

Browse files
Boris OstrovskyDavid Vrabel
authored andcommitted
xen/PMU: Describe vendor-specific PMU registers
AMD and Intel PMU register initialization and helpers that determine whether a register belongs to PMU. This and some of subsequent PMU emulation code is somewhat similar to Xen's PMU implementation. Signed-off-by: Boris Ostrovsky <[email protected]> Reviewed-by: David Vrabel <[email protected]> Signed-off-by: David Vrabel <[email protected]>
1 parent 65d0cf0 commit e27b72d

File tree

1 file changed

+152
-1
lines changed

1 file changed

+152
-1
lines changed

arch/x86/xen/pmu.c

Lines changed: 152 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,155 @@
1818
static DEFINE_PER_CPU(struct xen_pmu_data *, xenpmu_shared);
1919
#define get_xenpmu_data() per_cpu(xenpmu_shared, smp_processor_id())
2020

21+
22+
/* AMD PMU */
23+
#define F15H_NUM_COUNTERS 6
24+
#define F10H_NUM_COUNTERS 4
25+
26+
static __read_mostly uint32_t amd_counters_base;
27+
static __read_mostly uint32_t amd_ctrls_base;
28+
static __read_mostly int amd_msr_step;
29+
static __read_mostly int k7_counters_mirrored;
30+
static __read_mostly int amd_num_counters;
31+
32+
/* Intel PMU */
33+
#define MSR_TYPE_COUNTER 0
34+
#define MSR_TYPE_CTRL 1
35+
#define MSR_TYPE_GLOBAL 2
36+
#define MSR_TYPE_ARCH_COUNTER 3
37+
#define MSR_TYPE_ARCH_CTRL 4
38+
39+
/* Number of general pmu registers (CPUID.EAX[0xa].EAX[8..15]) */
40+
#define PMU_GENERAL_NR_SHIFT 8
41+
#define PMU_GENERAL_NR_BITS 8
42+
#define PMU_GENERAL_NR_MASK (((1 << PMU_GENERAL_NR_BITS) - 1) \
43+
<< PMU_GENERAL_NR_SHIFT)
44+
45+
/* Number of fixed pmu registers (CPUID.EDX[0xa].EDX[0..4]) */
46+
#define PMU_FIXED_NR_SHIFT 0
47+
#define PMU_FIXED_NR_BITS 5
48+
#define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) - 1) \
49+
<< PMU_FIXED_NR_SHIFT)
50+
51+
/* Alias registers (0x4c1) for full-width writes to PMCs */
52+
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
53+
54+
static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
55+
56+
57+
static void xen_pmu_arch_init(void)
58+
{
59+
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
60+
61+
switch (boot_cpu_data.x86) {
62+
case 0x15:
63+
amd_num_counters = F15H_NUM_COUNTERS;
64+
amd_counters_base = MSR_F15H_PERF_CTR;
65+
amd_ctrls_base = MSR_F15H_PERF_CTL;
66+
amd_msr_step = 2;
67+
k7_counters_mirrored = 1;
68+
break;
69+
case 0x10:
70+
case 0x12:
71+
case 0x14:
72+
case 0x16:
73+
default:
74+
amd_num_counters = F10H_NUM_COUNTERS;
75+
amd_counters_base = MSR_K7_PERFCTR0;
76+
amd_ctrls_base = MSR_K7_EVNTSEL0;
77+
amd_msr_step = 1;
78+
k7_counters_mirrored = 0;
79+
break;
80+
}
81+
} else {
82+
uint32_t eax, ebx, ecx, edx;
83+
84+
cpuid(0xa, &eax, &ebx, &ecx, &edx);
85+
86+
intel_num_arch_counters = (eax & PMU_GENERAL_NR_MASK) >>
87+
PMU_GENERAL_NR_SHIFT;
88+
intel_num_fixed_counters = (edx & PMU_FIXED_NR_MASK) >>
89+
PMU_FIXED_NR_SHIFT;
90+
}
91+
}
92+
93+
static inline uint32_t get_fam15h_addr(u32 addr)
94+
{
95+
switch (addr) {
96+
case MSR_K7_PERFCTR0:
97+
case MSR_K7_PERFCTR1:
98+
case MSR_K7_PERFCTR2:
99+
case MSR_K7_PERFCTR3:
100+
return MSR_F15H_PERF_CTR + (addr - MSR_K7_PERFCTR0);
101+
case MSR_K7_EVNTSEL0:
102+
case MSR_K7_EVNTSEL1:
103+
case MSR_K7_EVNTSEL2:
104+
case MSR_K7_EVNTSEL3:
105+
return MSR_F15H_PERF_CTL + (addr - MSR_K7_EVNTSEL0);
106+
default:
107+
break;
108+
}
109+
110+
return addr;
111+
}
112+
113+
static inline bool is_amd_pmu_msr(unsigned int msr)
114+
{
115+
if ((msr >= MSR_F15H_PERF_CTL &&
116+
msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
117+
(msr >= MSR_K7_EVNTSEL0 &&
118+
msr < MSR_K7_PERFCTR0 + amd_num_counters))
119+
return true;
120+
121+
return false;
122+
}
123+
124+
static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
125+
{
126+
u32 msr_index_pmc;
127+
128+
switch (msr_index) {
129+
case MSR_CORE_PERF_FIXED_CTR_CTRL:
130+
case MSR_IA32_DS_AREA:
131+
case MSR_IA32_PEBS_ENABLE:
132+
*type = MSR_TYPE_CTRL;
133+
return true;
134+
135+
case MSR_CORE_PERF_GLOBAL_CTRL:
136+
case MSR_CORE_PERF_GLOBAL_STATUS:
137+
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
138+
*type = MSR_TYPE_GLOBAL;
139+
return true;
140+
141+
default:
142+
143+
if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) &&
144+
(msr_index < MSR_CORE_PERF_FIXED_CTR0 +
145+
intel_num_fixed_counters)) {
146+
*index = msr_index - MSR_CORE_PERF_FIXED_CTR0;
147+
*type = MSR_TYPE_COUNTER;
148+
return true;
149+
}
150+
151+
if ((msr_index >= MSR_P6_EVNTSEL0) &&
152+
(msr_index < MSR_P6_EVNTSEL0 + intel_num_arch_counters)) {
153+
*index = msr_index - MSR_P6_EVNTSEL0;
154+
*type = MSR_TYPE_ARCH_CTRL;
155+
return true;
156+
}
157+
158+
msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK;
159+
if ((msr_index_pmc >= MSR_IA32_PERFCTR0) &&
160+
(msr_index_pmc < MSR_IA32_PERFCTR0 +
161+
intel_num_arch_counters)) {
162+
*type = MSR_TYPE_ARCH_COUNTER;
163+
*index = msr_index_pmc - MSR_IA32_PERFCTR0;
164+
return true;
165+
}
166+
return false;
167+
}
168+
}
169+
21170
/* perf callbacks */
22171
static int xen_is_in_guest(void)
23172
{
@@ -141,8 +290,10 @@ void xen_pmu_init(int cpu)
141290

142291
per_cpu(xenpmu_shared, cpu) = xenpmu_data;
143292

144-
if (cpu == 0)
293+
if (cpu == 0) {
145294
perf_register_guest_info_callbacks(&xen_guest_cbs);
295+
xen_pmu_arch_init();
296+
}
146297

147298
return;
148299

0 commit comments

Comments
 (0)