Skip to content

Commit 2da3314

Browse files
kliang2Ingo Molnar
authored andcommitted
perf/x86/intel/uncore: Introduce customized event_read() for client IMC uncore
There are two free-running counters for client IMC uncore. The customized event_init() function hard codes their index to 'UNCORE_PMC_IDX_FIXED' and 'UNCORE_PMC_IDX_FIXED + 1'. To support the index 'UNCORE_PMC_IDX_FIXED + 1', the generic uncore_perf_event_update is obscurely hacked. The code quality issue will bring problems when a new counter index is introduced into the generic code, for example, a new index for free-running counter. Introducing a customized event_read() function for client IMC uncore. The customized function is copied from previous generic uncore_pmu_event_read(). The index 'UNCORE_PMC_IDX_FIXED + 1' will be isolated for client IMC uncore only. Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Thomas Gleixner <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent c52b5c5 commit 2da3314

File tree

1 file changed

+31
-2
lines changed

1 file changed

+31
-2
lines changed

arch/x86/events/intel/uncore_snb.c

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -450,6 +450,35 @@ static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
450450
uncore_pmu_start_hrtimer(box);
451451
}
452452

453+
static void snb_uncore_imc_event_read(struct perf_event *event)
454+
{
455+
struct intel_uncore_box *box = uncore_event_to_box(event);
456+
u64 prev_count, new_count, delta;
457+
int shift;
458+
459+
/*
460+
* There are two free running counters in IMC.
461+
* The index for the second one is hardcoded to
462+
* UNCORE_PMC_IDX_FIXED + 1.
463+
*/
464+
if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
465+
shift = 64 - uncore_fixed_ctr_bits(box);
466+
else
467+
shift = 64 - uncore_perf_ctr_bits(box);
468+
469+
/* the hrtimer might modify the previous event value */
470+
again:
471+
prev_count = local64_read(&event->hw.prev_count);
472+
new_count = uncore_read_counter(box, event);
473+
if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
474+
goto again;
475+
476+
delta = (new_count << shift) - (prev_count << shift);
477+
delta >>= shift;
478+
479+
local64_add(delta, &event->count);
480+
}
481+
453482
static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
454483
{
455484
struct intel_uncore_box *box = uncore_event_to_box(event);
@@ -472,7 +501,7 @@ static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
472501
* Drain the remaining delta count out of a event
473502
* that we are disabling:
474503
*/
475-
uncore_perf_event_update(box, event);
504+
snb_uncore_imc_event_read(event);
476505
hwc->state |= PERF_HES_UPTODATE;
477506
}
478507
}
@@ -534,7 +563,7 @@ static struct pmu snb_uncore_imc_pmu = {
534563
.del = snb_uncore_imc_event_del,
535564
.start = snb_uncore_imc_event_start,
536565
.stop = snb_uncore_imc_event_stop,
537-
.read = uncore_pmu_event_read,
566+
.read = snb_uncore_imc_event_read,
538567
};
539568

540569
static struct intel_uncore_ops snb_uncore_imc_ops = {

0 commit comments

Comments
 (0)