Skip to content

Commit 3c33010

Browse files
committed
Merge branch 'perf-counters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-perf
* 'perf-counters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-perf: (31 commits) perf_counter tools: Give perf top inherit option perf_counter tools: Fix vmlinux symbol generation breakage perf_counter: Detect debugfs location perf_counter: Add tracepoint support to perf list, perf stat perf symbol: C++ demangling perf: avoid structure size confusion by using a fixed size perf_counter: Fix throttle/unthrottle event logging perf_counter: Improve perf stat and perf record option parsing perf_counter: PERF_SAMPLE_ID and inherited counters perf_counter: Plug more stack leaks perf: Fix stack data leak perf_counter: Remove unused variables perf_counter: Make call graph option consistent perf_counter: Add perf record option to log addresses perf_counter: Log vfork as a fork event perf_counter: Synthesize VDSO mmap event perf_counter: Make sure we dont leak kernel memory to userspace perf_counter tools: Fix index boundary check perf_counter: Fix the tracepoint channel to perfcounters perf_counter, x86: Extend perf_counter Pentium M support ...
2 parents 612e900 + 0fdc7e6 commit 3c33010

25 files changed

+948
-299
lines changed

arch/x86/kernel/cpu/perf_counter.c

Lines changed: 233 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
6565
.enabled = 1,
6666
};
6767

68+
/*
69+
* Not sure about some of these
70+
*/
71+
static const u64 p6_perfmon_event_map[] =
72+
{
73+
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
74+
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
75+
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
76+
[PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
77+
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
78+
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
79+
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
80+
};
81+
82+
static u64 p6_pmu_event_map(int event)
83+
{
84+
return p6_perfmon_event_map[event];
85+
}
86+
87+
/*
88+
* Counter setting that is specified not to count anything.
89+
* We use this to effectively disable a counter.
90+
*
91+
* L2_RQSTS with 0 MESI unit mask.
92+
*/
93+
#define P6_NOP_COUNTER 0x0000002EULL
94+
95+
static u64 p6_pmu_raw_event(u64 event)
96+
{
97+
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
98+
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
99+
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
100+
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
101+
#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
102+
103+
#define P6_EVNTSEL_MASK \
104+
(P6_EVNTSEL_EVENT_MASK | \
105+
P6_EVNTSEL_UNIT_MASK | \
106+
P6_EVNTSEL_EDGE_MASK | \
107+
P6_EVNTSEL_INV_MASK | \
108+
P6_EVNTSEL_COUNTER_MASK)
109+
110+
return event & P6_EVNTSEL_MASK;
111+
}
112+
113+
68114
/*
69115
* Intel PerfMon v3. Used on Core2 and later.
70116
*/
@@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
666712
{
667713
struct perf_counter_attr *attr = &counter->attr;
668714
struct hw_perf_counter *hwc = &counter->hw;
715+
u64 config;
669716
int err;
670717

671718
if (!x86_pmu_initialized())
@@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
718765

719766
if (attr->config >= x86_pmu.max_events)
720767
return -EINVAL;
768+
721769
/*
722770
* The generic map:
723771
*/
724-
hwc->config |= x86_pmu.event_map(attr->config);
772+
config = x86_pmu.event_map(attr->config);
773+
774+
if (config == 0)
775+
return -ENOENT;
776+
777+
if (config == -1LL)
778+
return -EINVAL;
779+
780+
hwc->config |= config;
725781

726782
return 0;
727783
}
728784

785+
static void p6_pmu_disable_all(void)
786+
{
787+
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
788+
u64 val;
789+
790+
if (!cpuc->enabled)
791+
return;
792+
793+
cpuc->enabled = 0;
794+
barrier();
795+
796+
/* p6 only has one enable register */
797+
rdmsrl(MSR_P6_EVNTSEL0, val);
798+
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
799+
wrmsrl(MSR_P6_EVNTSEL0, val);
800+
}
801+
729802
static void intel_pmu_disable_all(void)
730803
{
731804
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -767,6 +840,23 @@ void hw_perf_disable(void)
767840
return x86_pmu.disable_all();
768841
}
769842

843+
static void p6_pmu_enable_all(void)
844+
{
845+
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
846+
unsigned long val;
847+
848+
if (cpuc->enabled)
849+
return;
850+
851+
cpuc->enabled = 1;
852+
barrier();
853+
854+
/* p6 only has one enable register */
855+
rdmsrl(MSR_P6_EVNTSEL0, val);
856+
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
857+
wrmsrl(MSR_P6_EVNTSEL0, val);
858+
}
859+
770860
static void intel_pmu_enable_all(void)
771861
{
772862
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
@@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void)
784874
barrier();
785875

786876
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
877+
struct perf_counter *counter = cpuc->counters[idx];
787878
u64 val;
788879

789880
if (!test_bit(idx, cpuc->active_mask))
790881
continue;
791-
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
792-
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
793-
continue;
882+
883+
val = counter->hw.config;
794884
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
795885
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
796886
}
@@ -819,30 +909,38 @@ static inline void intel_pmu_ack_status(u64 ack)
819909

820910
static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
821911
{
822-
int err;
823-
err = checking_wrmsrl(hwc->config_base + idx,
912+
(void)checking_wrmsrl(hwc->config_base + idx,
824913
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
825914
}
826915

827916
static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
828917
{
829-
int err;
830-
err = checking_wrmsrl(hwc->config_base + idx,
831-
hwc->config);
918+
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
832919
}
833920

834921
static inline void
835922
intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
836923
{
837924
int idx = __idx - X86_PMC_IDX_FIXED;
838925
u64 ctrl_val, mask;
839-
int err;
840926

841927
mask = 0xfULL << (idx * 4);
842928

843929
rdmsrl(hwc->config_base, ctrl_val);
844930
ctrl_val &= ~mask;
845-
err = checking_wrmsrl(hwc->config_base, ctrl_val);
931+
(void)checking_wrmsrl(hwc->config_base, ctrl_val);
932+
}
933+
934+
static inline void
935+
p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
936+
{
937+
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
938+
u64 val = P6_NOP_COUNTER;
939+
940+
if (cpuc->enabled)
941+
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
942+
943+
(void)checking_wrmsrl(hwc->config_base + idx, val);
846944
}
847945

848946
static inline void
@@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
9431041
err = checking_wrmsrl(hwc->config_base, ctrl_val);
9441042
}
9451043

1044+
static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1045+
{
1046+
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1047+
u64 val;
1048+
1049+
val = hwc->config;
1050+
if (cpuc->enabled)
1051+
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1052+
1053+
(void)checking_wrmsrl(hwc->config_base + idx, val);
1054+
}
1055+
1056+
9461057
static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
9471058
{
9481059
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
9591070

9601071
if (cpuc->enabled)
9611072
x86_pmu_enable_counter(hwc, idx);
962-
else
963-
x86_pmu_disable_counter(hwc, idx);
9641073
}
9651074

9661075
static int
@@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void)
11761285
local_irq_restore(flags);
11771286
}
11781287

1288+
static int p6_pmu_handle_irq(struct pt_regs *regs)
1289+
{
1290+
struct perf_sample_data data;
1291+
struct cpu_hw_counters *cpuc;
1292+
struct perf_counter *counter;
1293+
struct hw_perf_counter *hwc;
1294+
int idx, handled = 0;
1295+
u64 val;
1296+
1297+
data.regs = regs;
1298+
data.addr = 0;
1299+
1300+
cpuc = &__get_cpu_var(cpu_hw_counters);
1301+
1302+
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1303+
if (!test_bit(idx, cpuc->active_mask))
1304+
continue;
1305+
1306+
counter = cpuc->counters[idx];
1307+
hwc = &counter->hw;
1308+
1309+
val = x86_perf_counter_update(counter, hwc, idx);
1310+
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
1311+
continue;
1312+
1313+
/*
1314+
* counter overflow
1315+
*/
1316+
handled = 1;
1317+
data.period = counter->hw.last_period;
1318+
1319+
if (!x86_perf_counter_set_period(counter, hwc, idx))
1320+
continue;
1321+
1322+
if (perf_counter_overflow(counter, 1, &data))
1323+
p6_pmu_disable_counter(hwc, idx);
1324+
}
1325+
1326+
if (handled)
1327+
inc_irq_stat(apic_perf_irqs);
1328+
1329+
return handled;
1330+
}
11791331

11801332
/*
11811333
* This handler is triggered by the local APIC, so the APIC IRQ handling
@@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
11851337
{
11861338
struct perf_sample_data data;
11871339
struct cpu_hw_counters *cpuc;
1188-
int bit, cpu, loops;
1340+
int bit, loops;
11891341
u64 ack, status;
11901342

11911343
data.regs = regs;
11921344
data.addr = 0;
11931345

1194-
cpu = smp_processor_id();
1195-
cpuc = &per_cpu(cpu_hw_counters, cpu);
1346+
cpuc = &__get_cpu_var(cpu_hw_counters);
11961347

11971348
perf_disable();
11981349
status = intel_pmu_get_status();
@@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
12491400
struct cpu_hw_counters *cpuc;
12501401
struct perf_counter *counter;
12511402
struct hw_perf_counter *hwc;
1252-
int cpu, idx, handled = 0;
1403+
int idx, handled = 0;
12531404
u64 val;
12541405

12551406
data.regs = regs;
12561407
data.addr = 0;
12571408

1258-
cpu = smp_processor_id();
1259-
cpuc = &per_cpu(cpu_hw_counters, cpu);
1409+
cpuc = &__get_cpu_var(cpu_hw_counters);
12601410

12611411
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
12621412
if (!test_bit(idx, cpuc->active_mask))
@@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
13531503
.priority = 1
13541504
};
13551505

1506+
static struct x86_pmu p6_pmu = {
1507+
.name = "p6",
1508+
.handle_irq = p6_pmu_handle_irq,
1509+
.disable_all = p6_pmu_disable_all,
1510+
.enable_all = p6_pmu_enable_all,
1511+
.enable = p6_pmu_enable_counter,
1512+
.disable = p6_pmu_disable_counter,
1513+
.eventsel = MSR_P6_EVNTSEL0,
1514+
.perfctr = MSR_P6_PERFCTR0,
1515+
.event_map = p6_pmu_event_map,
1516+
.raw_event = p6_pmu_raw_event,
1517+
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
1518+
.max_period = (1ULL << 31) - 1,
1519+
.version = 0,
1520+
.num_counters = 2,
1521+
/*
1522+
* Counters have 40 bits implemented. However they are designed such
1523+
* that bits [32-39] are sign extensions of bit 31. As such the
1524+
* effective width of a counter for P6-like PMU is 32 bits only.
1525+
*
1526+
* See IA-32 Intel Architecture Software developer manual Vol 3B
1527+
*/
1528+
.counter_bits = 32,
1529+
.counter_mask = (1ULL << 32) - 1,
1530+
};
1531+
13561532
static struct x86_pmu intel_pmu = {
13571533
.name = "Intel",
13581534
.handle_irq = intel_pmu_handle_irq,
@@ -1392,6 +1568,37 @@ static struct x86_pmu amd_pmu = {
13921568
.max_period = (1ULL << 47) - 1,
13931569
};
13941570

1571+
static int p6_pmu_init(void)
1572+
{
1573+
switch (boot_cpu_data.x86_model) {
1574+
case 1:
1575+
case 3: /* Pentium Pro */
1576+
case 5:
1577+
case 6: /* Pentium II */
1578+
case 7:
1579+
case 8:
1580+
case 11: /* Pentium III */
1581+
break;
1582+
case 9:
1583+
case 13:
1584+
/* Pentium M */
1585+
break;
1586+
default:
1587+
pr_cont("unsupported p6 CPU model %d ",
1588+
boot_cpu_data.x86_model);
1589+
return -ENODEV;
1590+
}
1591+
1592+
if (!cpu_has_apic) {
1593+
pr_info("no Local APIC, try rebooting with lapic");
1594+
return -ENODEV;
1595+
}
1596+
1597+
x86_pmu = p6_pmu;
1598+
1599+
return 0;
1600+
}
1601+
13951602
static int intel_pmu_init(void)
13961603
{
13971604
union cpuid10_edx edx;
@@ -1400,8 +1607,14 @@ static int intel_pmu_init(void)
14001607
unsigned int ebx;
14011608
int version;
14021609

1403-
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
1610+
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1611+
/* check for P6 processor family */
1612+
if (boot_cpu_data.x86 == 6) {
1613+
return p6_pmu_init();
1614+
} else {
14041615
return -ENODEV;
1616+
}
1617+
}
14051618

14061619
/*
14071620
* Check whether the Architectural PerfMon supports

0 commit comments

Comments
 (0)