Skip to content

Commit 451d24d

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf/core: Fix crash in perf_event_read()
Alexei had his box explode because doing read() on a package (rapl/uncore) event that isn't currently scheduled in ends up doing an out-of-bounds load. Rework the code to more explicitly deal with event->oncpu being -1. Reported-by: Alexei Starovoitov <[email protected]> Tested-by: Alexei Starovoitov <[email protected]> Tested-by: David Carrillo-Cisneros <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Fixes: d6a2f90 ("perf/core: Introduce PMU_EV_CAP_READ_ACTIVE_PKG") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 53e74a1 commit 451d24d

File tree

1 file changed

+15
-10
lines changed

1 file changed

+15
-10
lines changed

kernel/events/core.c

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3487,14 +3487,15 @@ struct perf_read_data {
34873487
int ret;
34883488
};
34893489

3490-
static int find_cpu_to_read(struct perf_event *event, int local_cpu)
3490+
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
34913491
{
3492-
int event_cpu = event->oncpu;
34933492
u16 local_pkg, event_pkg;
34943493

34953494
if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
3496-
event_pkg = topology_physical_package_id(event_cpu);
3497-
local_pkg = topology_physical_package_id(local_cpu);
3495+
int local_cpu = smp_processor_id();
3496+
3497+
event_pkg = topology_physical_package_id(event_cpu);
3498+
local_pkg = topology_physical_package_id(local_cpu);
34983499

34993500
if (event_pkg == local_pkg)
35003501
return local_cpu;
@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)
36243625

36253626
static int perf_event_read(struct perf_event *event, bool group)
36263627
{
3627-
int ret = 0, cpu_to_read, local_cpu;
3628+
int event_cpu, ret = 0;
36283629

36293630
/*
36303631
* If event is enabled and currently active on a CPU, update the
@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
36373638
.ret = 0,
36383639
};
36393640

3640-
local_cpu = get_cpu();
3641-
cpu_to_read = find_cpu_to_read(event, local_cpu);
3642-
put_cpu();
3641+
event_cpu = READ_ONCE(event->oncpu);
3642+
if ((unsigned)event_cpu >= nr_cpu_ids)
3643+
return 0;
3644+
3645+
preempt_disable();
3646+
event_cpu = __perf_event_read_cpu(event, event_cpu);
36433647

36443648
/*
36453649
* Purposely ignore the smp_call_function_single() return
36463650
* value.
36473651
*
3648-
* If event->oncpu isn't a valid CPU it means the event got
3652+
* If event_cpu isn't a valid CPU it means the event got
36493653
* scheduled out and that will have updated the event count.
36503654
*
36513655
* Therefore, either way, we'll have an up-to-date event count
36523656
* after this.
36533657
*/
3654-
(void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
3658+
(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
3659+
preempt_enable();
36553660
ret = data.ret;
36563661
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
36573662
struct perf_event_context *ctx = event->ctx;

0 commit comments

Comments
 (0)