Skip to content

Commit 4cbfd3d

Browse files
committed
workqueue: Call wq_update_unbound_numa() on all CPUs in NUMA node on CPU hotplug
When a CPU went online or offline, wq_update_unbound_numa() was called only on the CPU which was going up or down. This works fine because all CPUs on the same NUMA node share the same pool_workqueue slot - one CPU updating it updates it for everyone in the node. However, future changes will make each CPU use a separate pool_workqueue even when they're sharing the same worker_pool, which requires updating pool_workqueue's for all CPUs which may be sharing the same pool_workqueue on hotplug. To accommodate the planned changes, this patch updates workqueue_on/offline_cpu() so that they call wq_update_unbound_numa() for all CPUs sharing the same NUMA node as the CPU going up or down. In the current code, the second+ calls would be noops and there shouldn't be any behavior changes. * As wq_update_unbound_numa() is now called on multiple CPUs per each hotplug event, @cpu is renamed to @hotplug_cpu and another @cpu argument is added. The former indicates the CPU being hot[un]plugged and the latter the CPU whose pool_workqueue is being updated. * In wq_update_unbound_numa(), cpu_off is renamed to off_cpu for consistency with the new @hotplug_cpu. Signed-off-by: Tejun Heo <[email protected]>
1 parent 687a9aa commit 4cbfd3d

File tree

1 file changed

+25
-9
lines changed

1 file changed

+25
-9
lines changed

kernel/workqueue.c

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4422,7 +4422,8 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
44224422
/**
44234423
* wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
44244424
* @wq: the target workqueue
4425-
* @cpu: the CPU coming up or going down
4425+
* @cpu: the CPU to update pool association for
4426+
* @hotplug_cpu: the CPU coming up or going down
44264427
* @online: whether @cpu is coming up or going down
44274428
*
44284429
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
@@ -4442,10 +4443,10 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
44424443
* CPU_DOWN_PREPARE.
44434444
*/
44444445
static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4445-
bool online)
4446+
int hotplug_cpu, bool online)
44464447
{
44474448
int node = cpu_to_node(cpu);
4448-
int cpu_off = online ? -1 : cpu;
4449+
int off_cpu = online ? -1 : hotplug_cpu;
44494450
struct pool_workqueue *old_pwq = NULL, *pwq;
44504451
struct workqueue_attrs *target_attrs;
44514452
cpumask_t *cpumask;
@@ -4473,7 +4474,7 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
44734474
* and create a new one if they don't match. If the target cpumask
44744475
* equals the default pwq's, the default pwq should be used.
44754476
*/
4476-
if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4477+
if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, off_cpu, cpumask)) {
44774478
if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
44784479
return;
44794480
} else {
@@ -5514,8 +5515,15 @@ int workqueue_online_cpu(unsigned int cpu)
55145515
}
55155516

55165517
/* update NUMA affinity of unbound workqueues */
5517-
list_for_each_entry(wq, &workqueues, list)
5518-
wq_update_unbound_numa(wq, cpu, true);
5518+
list_for_each_entry(wq, &workqueues, list) {
5519+
int tcpu;
5520+
5521+
for_each_possible_cpu(tcpu) {
5522+
if (cpu_to_node(tcpu) == cpu_to_node(cpu)) {
5523+
wq_update_unbound_numa(wq, tcpu, cpu, true);
5524+
}
5525+
}
5526+
}
55195527

55205528
mutex_unlock(&wq_pool_mutex);
55215529
return 0;
@@ -5533,8 +5541,15 @@ int workqueue_offline_cpu(unsigned int cpu)
55335541

55345542
/* update NUMA affinity of unbound workqueues */
55355543
mutex_lock(&wq_pool_mutex);
5536-
list_for_each_entry(wq, &workqueues, list)
5537-
wq_update_unbound_numa(wq, cpu, false);
5544+
list_for_each_entry(wq, &workqueues, list) {
5545+
int tcpu;
5546+
5547+
for_each_possible_cpu(tcpu) {
5548+
if (cpu_to_node(tcpu) == cpu_to_node(cpu)) {
5549+
wq_update_unbound_numa(wq, tcpu, cpu, false);
5550+
}
5551+
}
5552+
}
55385553
mutex_unlock(&wq_pool_mutex);
55395554

55405555
return 0;
@@ -6509,7 +6524,8 @@ void __init workqueue_init(void)
65096524
}
65106525

65116526
list_for_each_entry(wq, &workqueues, list) {
6512-
wq_update_unbound_numa(wq, smp_processor_id(), true);
6527+
wq_update_unbound_numa(wq, smp_processor_id(), smp_processor_id(),
6528+
true);
65136529
WARN(init_rescuer(wq),
65146530
"workqueue: failed to create early rescuer for %s",
65156531
wq->name);

0 commit comments

Comments
 (0)