@@ -2736,28 +2736,18 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2736
2736
{
2737
2737
unsigned long flags ;
2738
2738
struct rq * rq ;
2739
- int cpu __maybe_unused = get_cpu ();
2740
2739
2740
+ raw_spin_lock_irqsave (& p -> pi_lock , flags );
2741
2741
#ifdef CONFIG_SMP
2742
- rq = task_rq_lock (p , & flags );
2743
- p -> state = TASK_WAKING ;
2744
-
2745
2742
/*
2746
2743
* Fork balancing, do it here and not earlier because:
2747
2744
* - cpus_allowed can change in the fork path
2748
2745
* - any previously selected cpu might disappear through hotplug
2749
- *
2750
- * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2751
- * without people poking at ->cpus_allowed.
2752
2746
*/
2753
- cpu = select_task_rq (p , SD_BALANCE_FORK , 0 );
2754
- set_task_cpu (p , cpu );
2755
-
2756
- p -> state = TASK_RUNNING ;
2757
- task_rq_unlock (rq , p , & flags );
2747
+ set_task_cpu (p , select_task_rq (p , SD_BALANCE_FORK , 0 ));
2758
2748
#endif
2759
2749
2760
- rq = task_rq_lock ( p , & flags );
2750
+ rq = __task_rq_lock ( p );
2761
2751
activate_task (rq , p , 0 );
2762
2752
p -> on_rq = 1 ;
2763
2753
trace_sched_wakeup_new (p , true);
@@ -2767,7 +2757,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2767
2757
p -> sched_class -> task_woken (rq , p );
2768
2758
#endif
2769
2759
task_rq_unlock (rq , p , & flags );
2770
- put_cpu ();
2771
2760
}
2772
2761
2773
2762
#ifdef CONFIG_PREEMPT_NOTIFIERS
0 commit comments