Skip to content

Commit fcecfa8

Browse files
committed
workqueue: Remove module param disable_numa and sysfs knobs pool_ids and numa
Unbound workqueue CPU affinity is going to receive an overhaul and the NUMA specific knobs won't make sense anymore. Remove them. Also, the pool_ids knob was used for debugging and not really meaningful given that there is no visibility into the pools associated with those IDs. Remove it too. A future patch will improve overall visibility. Signed-off-by: Tejun Heo <[email protected]>
1 parent 797e834 commit fcecfa8

File tree

2 files changed

+0
-82
lines changed

2 files changed

+0
-82
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6992,15 +6992,6 @@
69926992
threshold repeatedly. They are likely good
69936993
candidates for using WQ_UNBOUND workqueues instead.
69946994

6995-
workqueue.disable_numa
6996-
By default, all work items queued to unbound
6997-
workqueues are affine to the NUMA nodes they're
6998-
issued on, which results in better behavior in
6999-
general. If NUMA affinity needs to be disabled for
7000-
whatever reason, this option can be used. Note
7001-
that this also can be controlled per-workqueue for
7002-
workqueues visible under /sys/bus/workqueue/.
7003-
70046995
workqueue.power_efficient
70056996
Per-cpu workqueues are generally preferred because
70066997
they show better performance thanks to cache

kernel/workqueue.c

Lines changed: 0 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -340,9 +340,6 @@ static cpumask_var_t *wq_numa_possible_cpumask;
340340
static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
341341
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
342342

343-
static bool wq_disable_numa;
344-
module_param_named(disable_numa, wq_disable_numa, bool, 0444);
345-
346343
/* see the comment above the definition of WQ_POWER_EFFICIENT */
347344
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
348345
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
@@ -5794,10 +5791,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
57945791
*
57955792
* Unbound workqueues have the following extra attributes.
57965793
*
5797-
* pool_ids RO int : the associated pool IDs for each node
57985794
* nice RW int : nice value of the workers
57995795
* cpumask RW mask : bitmask of allowed CPUs for the workers
5800-
* numa RW bool : whether enable NUMA affinity
58015796
*/
58025797
struct wq_device {
58035798
struct workqueue_struct *wq;
@@ -5850,28 +5845,6 @@ static struct attribute *wq_sysfs_attrs[] = {
58505845
};
58515846
ATTRIBUTE_GROUPS(wq_sysfs);
58525847

5853-
static ssize_t wq_pool_ids_show(struct device *dev,
5854-
struct device_attribute *attr, char *buf)
5855-
{
5856-
struct workqueue_struct *wq = dev_to_wq(dev);
5857-
const char *delim = "";
5858-
int node, written = 0;
5859-
5860-
cpus_read_lock();
5861-
rcu_read_lock();
5862-
for_each_node(node) {
5863-
written += scnprintf(buf + written, PAGE_SIZE - written,
5864-
"%s%d:%d", delim, node,
5865-
unbound_pwq_by_node(wq, node)->pool->id);
5866-
delim = " ";
5867-
}
5868-
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5869-
rcu_read_unlock();
5870-
cpus_read_unlock();
5871-
5872-
return written;
5873-
}
5874-
58755848
static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
58765849
char *buf)
58775850
{
@@ -5962,50 +5935,9 @@ static ssize_t wq_cpumask_store(struct device *dev,
59625935
return ret ?: count;
59635936
}
59645937

5965-
static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5966-
char *buf)
5967-
{
5968-
struct workqueue_struct *wq = dev_to_wq(dev);
5969-
int written;
5970-
5971-
mutex_lock(&wq->mutex);
5972-
written = scnprintf(buf, PAGE_SIZE, "%d\n",
5973-
!wq->unbound_attrs->no_numa);
5974-
mutex_unlock(&wq->mutex);
5975-
5976-
return written;
5977-
}
5978-
5979-
static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5980-
const char *buf, size_t count)
5981-
{
5982-
struct workqueue_struct *wq = dev_to_wq(dev);
5983-
struct workqueue_attrs *attrs;
5984-
int v, ret = -ENOMEM;
5985-
5986-
apply_wqattrs_lock();
5987-
5988-
attrs = wq_sysfs_prep_attrs(wq);
5989-
if (!attrs)
5990-
goto out_unlock;
5991-
5992-
ret = -EINVAL;
5993-
if (sscanf(buf, "%d", &v) == 1) {
5994-
attrs->no_numa = !v;
5995-
ret = apply_workqueue_attrs_locked(wq, attrs);
5996-
}
5997-
5998-
out_unlock:
5999-
apply_wqattrs_unlock();
6000-
free_workqueue_attrs(attrs);
6001-
return ret ?: count;
6002-
}
6003-
60045938
static struct device_attribute wq_sysfs_unbound_attrs[] = {
6005-
__ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
60065939
__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
60075940
__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
6008-
__ATTR(numa, 0644, wq_numa_show, wq_numa_store),
60095941
__ATTR_NULL,
60105942
};
60115943

@@ -6379,11 +6311,6 @@ static void __init wq_numa_init(void)
63796311
if (num_possible_nodes() <= 1)
63806312
return;
63816313

6382-
if (wq_disable_numa) {
6383-
pr_info("workqueue: NUMA affinity support disabled\n");
6384-
return;
6385-
}
6386-
63876314
for_each_possible_cpu(cpu) {
63886315
if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
63896316
pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);

0 commit comments

Comments
 (0)