Skip to content

Commit 025e168

Browse files
committed
workqueue: Modularize wq_pod_type initialization
While wq_pod_type[] can now group CPUs in any aribitrary way, WQ_AFFN_NUM init is hard coded into workqueue_init_topology(). This patch modularizes the init path by introducing init_pod_type() which takes a callback to determine whether two CPUs should share a pod as an argument. init_pod_type() first scans the CPU combinations testing for sharing to assign consecutive pod IDs and initialize pod_type->cpu_pod[]. Once ->cpu_pod[] is determined, ->pod_cpus[] and ->pod_node[] are initialized accordingly. WQ_AFFN_NUMA is now initialized by calling init_pod_type() with cpus_share_numa() which tests whether the CPU belongs to the same NUMA node. This patch may change the pod ID assigned to each NUMA node but that shouldn't cause any behavior changes as the NUMA node to use for allocations are tracked separately in pod_type->pod_node[]. This makes adding new affinty types pretty easy. Signed-off-by: Tejun Heo <[email protected]>
1 parent 7f7dc37 commit 025e168

File tree

1 file changed

+50
-34
lines changed

1 file changed

+50
-34
lines changed

kernel/workqueue.c

Lines changed: 50 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -6494,6 +6494,54 @@ void __init workqueue_init(void)
64946494
wq_watchdog_init();
64956495
}
64966496

6497+
/*
6498+
* Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
6499+
* @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
6500+
* and consecutive pod ID. The rest of @pt is initialized accordingly.
6501+
*/
6502+
static void __init init_pod_type(struct wq_pod_type *pt,
6503+
bool (*cpus_share_pod)(int, int))
6504+
{
6505+
int cur, pre, cpu, pod;
6506+
6507+
pt->nr_pods = 0;
6508+
6509+
/* init @pt->cpu_pod[] according to @cpus_share_pod() */
6510+
pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6511+
BUG_ON(!pt->cpu_pod);
6512+
6513+
for_each_possible_cpu(cur) {
6514+
for_each_possible_cpu(pre) {
6515+
if (pre >= cur) {
6516+
pt->cpu_pod[cur] = pt->nr_pods++;
6517+
break;
6518+
}
6519+
if (cpus_share_pod(cur, pre)) {
6520+
pt->cpu_pod[cur] = pt->cpu_pod[pre];
6521+
break;
6522+
}
6523+
}
6524+
}
6525+
6526+
/* init the rest to match @pt->cpu_pod[] */
6527+
pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6528+
pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
6529+
BUG_ON(!pt->pod_cpus || !pt->pod_node);
6530+
6531+
for (pod = 0; pod < pt->nr_pods; pod++)
6532+
BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
6533+
6534+
for_each_possible_cpu(cpu) {
6535+
cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
6536+
pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
6537+
}
6538+
}
6539+
6540+
static bool __init cpus_share_numa(int cpu0, int cpu1)
6541+
{
6542+
return cpu_to_node(cpu0) == cpu_to_node(cpu1);
6543+
}
6544+
64976545
/**
64986546
* workqueue_init_topology - initialize CPU pods for unbound workqueues
64996547
*
@@ -6503,45 +6551,13 @@ void __init workqueue_init(void)
65036551
*/
65046552
void __init workqueue_init_topology(void)
65056553
{
6506-
struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
65076554
struct workqueue_struct *wq;
6508-
int node, cpu;
6509-
6510-
if (num_possible_nodes() <= 1)
6511-
return;
6555+
int cpu;
65126556

6513-
for_each_possible_cpu(cpu) {
6514-
if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
6515-
pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
6516-
return;
6517-
}
6518-
}
6557+
init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
65196558

65206559
mutex_lock(&wq_pool_mutex);
65216560

6522-
/*
6523-
* We want masks of possible CPUs of each node which isn't readily
6524-
* available. Build one from cpu_to_node() which should have been
6525-
* fully initialized by now.
6526-
*/
6527-
pt->pod_cpus = kcalloc(nr_node_ids, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
6528-
pt->pod_node = kcalloc(nr_node_ids, sizeof(pt->pod_node[0]), GFP_KERNEL);
6529-
pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
6530-
BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
6531-
6532-
for_each_node(node)
6533-
BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[node], GFP_KERNEL,
6534-
node_online(node) ? node : NUMA_NO_NODE));
6535-
6536-
for_each_possible_cpu(cpu) {
6537-
node = cpu_to_node(cpu);
6538-
cpumask_set_cpu(cpu, pt->pod_cpus[node]);
6539-
pt->pod_node[node] = node;
6540-
pt->cpu_pod[cpu] = node;
6541-
}
6542-
6543-
pt->nr_pods = nr_node_ids;
6544-
65456561
/*
65466562
* Workqueues allocated earlier would have all CPUs sharing the default
65476563
* worker pool. Explicitly call wq_update_pod() on all workqueue and CPU

0 commit comments

Comments
 (0)