Skip to content

Commit 0e0162d

Browse files
kliang2Ingo Molnar
authored andcommitted
perf/x86/intel/uncore: Add infrastructure for free running counters
There are a number of free running counters introduced for uncore, which provide highly valuable information to a wide array of customers. However, the generic uncore code doesn't support them yet. The free running counters will be specially handled based on their unique attributes: - They are read-only. They cannot be enabled/disabled. - The event and the counter are always 1:1 mapped. It doesn't need to be assigned nor tracked by event_list. - They are always active. It doesn't need to check the availability. - They have different bit width. Also, using inline helpers to replace the check for fixed counter and free running counter. Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 927b2de commit 0e0162d

File tree

1 file changed

+64
-4
lines changed

1 file changed

+64
-4
lines changed

arch/x86/events/intel/uncore.c

Lines changed: 64 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
203203
hwc->idx = idx;
204204
hwc->last_tag = ++box->tags[idx];
205205

206-
if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
206+
if (uncore_pmc_fixed(hwc->idx)) {
207207
hwc->event_base = uncore_fixed_ctr(box);
208208
hwc->config_base = uncore_fixed_ctl(box);
209209
return;
@@ -218,7 +218,9 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
218218
u64 prev_count, new_count, delta;
219219
int shift;
220220

221-
if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
221+
if (uncore_pmc_freerunning(event->hw.idx))
222+
shift = 64 - uncore_freerunning_bits(box, event);
223+
else if (uncore_pmc_fixed(event->hw.idx))
222224
shift = 64 - uncore_fixed_ctr_bits(box);
223225
else
224226
shift = 64 - uncore_perf_ctr_bits(box);
@@ -454,10 +456,25 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags)
454456
struct intel_uncore_box *box = uncore_event_to_box(event);
455457
int idx = event->hw.idx;
456458

457-
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
459+
if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
458460
return;
459461

460-
if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
462+
/*
463+
* Free running counter is read-only and always active.
464+
* Use the current counter value as start point.
465+
* There is no overflow interrupt for free running counter.
466+
* Use hrtimer to periodically poll the counter to avoid overflow.
467+
*/
468+
if (uncore_pmc_freerunning(event->hw.idx)) {
469+
list_add_tail(&event->active_entry, &box->active_list);
470+
local64_set(&event->hw.prev_count,
471+
uncore_read_counter(box, event));
472+
if (box->n_active++ == 0)
473+
uncore_pmu_start_hrtimer(box);
474+
return;
475+
}
476+
477+
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
461478
return;
462479

463480
event->hw.state = 0;
@@ -479,6 +496,15 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags)
479496
struct intel_uncore_box *box = uncore_event_to_box(event);
480497
struct hw_perf_event *hwc = &event->hw;
481498

499+
/* Cannot disable free running counter which is read-only */
500+
if (uncore_pmc_freerunning(hwc->idx)) {
501+
list_del(&event->active_entry);
502+
if (--box->n_active == 0)
503+
uncore_pmu_cancel_hrtimer(box);
504+
uncore_perf_event_update(box, event);
505+
return;
506+
}
507+
482508
if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
483509
uncore_disable_event(box, event);
484510
box->n_active--;
@@ -512,6 +538,17 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags)
512538
if (!box)
513539
return -ENODEV;
514540

541+
/*
542+
* The free funning counter is assigned in event_init().
543+
* The free running counter event and free running counter
544+
* are 1:1 mapped. It doesn't need to be tracked in event_list.
545+
*/
546+
if (uncore_pmc_freerunning(hwc->idx)) {
547+
if (flags & PERF_EF_START)
548+
uncore_pmu_event_start(event, 0);
549+
return 0;
550+
}
551+
515552
ret = n = uncore_collect_events(box, event, false);
516553
if (ret < 0)
517554
return ret;
@@ -570,6 +607,14 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags)
570607

571608
uncore_pmu_event_stop(event, PERF_EF_UPDATE);
572609

610+
/*
611+
* The event for free running counter is not tracked by event_list.
612+
* It doesn't need to force event->hw.idx = -1 to reassign the counter.
613+
* Because the event and the free running counter are 1:1 mapped.
614+
*/
615+
if (uncore_pmc_freerunning(event->hw.idx))
616+
return;
617+
573618
for (i = 0; i < box->n_events; i++) {
574619
if (event == box->event_list[i]) {
575620
uncore_put_event_constraint(box, event);
@@ -603,6 +648,10 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
603648
struct intel_uncore_box *fake_box;
604649
int ret = -EINVAL, n;
605650

651+
/* The free running counter is always active. */
652+
if (uncore_pmc_freerunning(event->hw.idx))
653+
return 0;
654+
606655
fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
607656
if (!fake_box)
608657
return -ENOMEM;
@@ -690,6 +739,17 @@ static int uncore_pmu_event_init(struct perf_event *event)
690739

691740
/* fixed counters have event field hardcoded to zero */
692741
hwc->config = 0ULL;
742+
} else if (is_freerunning_event(event)) {
743+
if (!check_valid_freerunning_event(box, event))
744+
return -EINVAL;
745+
event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
746+
/*
747+
* The free running counter event and free running counter
748+
* are always 1:1 mapped.
749+
* The free running counter is always active.
750+
* Assign the free running counter here.
751+
*/
752+
event->hw.event_base = uncore_freerunning_counter(box, event);
693753
} else {
694754
hwc->config = event->attr.config &
695755
(pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));

0 commit comments

Comments
 (0)