Skip to content

Commit 27a96c4

Browse files
committed
Merge tag 'perf_urgent_for_v5.17_rc2_p2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Borislav Petkov: - Prevent accesses to the per-CPU cgroup context list from another CPU except the one it belongs to, to avoid list corruption - Make sure parent events are always woken up to avoid indefinite hangs in the traced workload * tag 'perf_urgent_for_v5.17_rc2_p2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Fix cgroup event list management perf: Always wake the parent event
2 parents 24f4db1 + c5de60c commit 27a96c4

File tree

1 file changed

+19
-4
lines changed

1 file changed

+19
-4
lines changed

kernel/events/core.c

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2462,7 +2462,11 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
24622462
* event_function_call() user.
24632463
*/
24642464
raw_spin_lock_irq(&ctx->lock);
2465-
if (!ctx->is_active) {
2465+
/*
2466+
* Cgroup events are per-cpu events, and must IPI because of
2467+
* cgrp_cpuctx_list.
2468+
*/
2469+
if (!ctx->is_active && !is_cgroup_event(event)) {
24662470
__perf_remove_from_context(event, __get_cpu_context(ctx),
24672471
ctx, (void *)flags);
24682472
raw_spin_unlock_irq(&ctx->lock);
@@ -2895,11 +2899,14 @@ perf_install_in_context(struct perf_event_context *ctx,
28952899
* perf_event_attr::disabled events will not run and can be initialized
28962900
* without IPI. Except when this is the first event for the context, in
28972901
* that case we need the magic of the IPI to set ctx->is_active.
2902+
* Similarly, cgroup events for the context also needs the IPI to
2903+
* manipulate the cgrp_cpuctx_list.
28982904
*
28992905
* The IOC_ENABLE that is sure to follow the creation of a disabled
29002906
* event will issue the IPI and reprogram the hardware.
29012907
*/
2902-
if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) {
2908+
if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
2909+
ctx->nr_events && !is_cgroup_event(event)) {
29032910
raw_spin_lock_irq(&ctx->lock);
29042911
if (ctx->task == TASK_TOMBSTONE) {
29052912
raw_spin_unlock_irq(&ctx->lock);
@@ -5985,6 +5992,8 @@ static void ring_buffer_attach(struct perf_event *event,
59855992
struct perf_buffer *old_rb = NULL;
59865993
unsigned long flags;
59875994

5995+
WARN_ON_ONCE(event->parent);
5996+
59885997
if (event->rb) {
59895998
/*
59905999
* Should be impossible, we set this when removing
@@ -6042,6 +6051,9 @@ static void ring_buffer_wakeup(struct perf_event *event)
60426051
{
60436052
struct perf_buffer *rb;
60446053

6054+
if (event->parent)
6055+
event = event->parent;
6056+
60456057
rcu_read_lock();
60466058
rb = rcu_dereference(event->rb);
60476059
if (rb) {
@@ -6055,6 +6067,9 @@ struct perf_buffer *ring_buffer_get(struct perf_event *event)
60556067
{
60566068
struct perf_buffer *rb;
60576069

6070+
if (event->parent)
6071+
event = event->parent;
6072+
60586073
rcu_read_lock();
60596074
rb = rcu_dereference(event->rb);
60606075
if (rb) {
@@ -6763,7 +6778,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
67636778
if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
67646779
goto out;
67656780

6766-
rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
6781+
rb = ring_buffer_get(sampler);
67676782
if (!rb)
67686783
goto out;
67696784

@@ -6829,7 +6844,7 @@ static void perf_aux_sample_output(struct perf_event *event,
68296844
if (WARN_ON_ONCE(!sampler || !data->aux_size))
68306845
return;
68316846

6832-
rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
6847+
rb = ring_buffer_get(sampler);
68336848
if (!rb)
68346849
return;
68356850

0 commit comments

Comments
 (0)