@@ -8726,7 +8726,13 @@ __perf_event_exit_task(struct perf_event *child_event,
8726
8726
* Do destroy all inherited groups, we don't care about those
8727
8727
* and being thorough is better.
8728
8728
*/
8729
- perf_remove_from_context (child_event , !!child_event -> parent );
8729
+ raw_spin_lock_irq (& child_ctx -> lock );
8730
+ WARN_ON_ONCE (child_ctx -> is_active );
8731
+
8732
+ if (!!child_event -> parent )
8733
+ perf_group_detach (child_event );
8734
+ list_del_event (child_event , child_ctx );
8735
+ raw_spin_unlock_irq (& child_ctx -> lock );
8730
8736
8731
8737
/*
8732
8738
* It can happen that the parent exits first, and has events
@@ -8746,17 +8752,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8746
8752
{
8747
8753
struct perf_event * child_event , * next ;
8748
8754
struct perf_event_context * child_ctx , * clone_ctx = NULL ;
8749
- unsigned long flags ;
8750
8755
8751
8756
if (likely (!child -> perf_event_ctxp [ctxn ]))
8752
8757
return ;
8753
8758
8754
- local_irq_save (flags );
8759
+ local_irq_disable ();
8760
+ WARN_ON_ONCE (child != current );
8755
8761
/*
8756
8762
* We can't reschedule here because interrupts are disabled,
8757
- * and either child is current or it is a task that can't be
8758
- * scheduled, so we are now safe from rescheduling changing
8759
- * our context.
8763
+ * and child must be current.
8760
8764
*/
8761
8765
child_ctx = rcu_dereference_raw (child -> perf_event_ctxp [ctxn ]);
8762
8766
@@ -8776,7 +8780,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8776
8780
*/
8777
8781
clone_ctx = unclone_ctx (child_ctx );
8778
8782
update_context_time (child_ctx );
8779
- raw_spin_unlock_irqrestore (& child_ctx -> lock , flags );
8783
+ raw_spin_unlock_irq (& child_ctx -> lock );
8780
8784
8781
8785
if (clone_ctx )
8782
8786
put_ctx (clone_ctx );
0 commit comments