Skip to content

Commit b6c3365

Browse files
legionusebiederm
authored andcommitted
Use atomic_t for ucounts reference counting
The current implementation of the ucounts reference counter requires the use of spin_lock. We're going to use get_ucounts() in more performance critical areas like a handling of RLIMIT_SIGPENDING. Now we need to use spin_lock only if we want to change the hashtable. v10: * Always try to put ucounts in case we cannot increase ucounts->count. This will allow to cover the case when all consumers will return ucounts at once. v9: * Use a negative value to check that the ucounts->count is close to overflow. Signed-off-by: Alexey Gladkov <[email protected]> Link: https://lkml.kernel.org/r/94d1dbecab060a6b116b0a2d1accd8ca1bbb4f5f.1619094428.git.legion@kernel.org Signed-off-by: Eric W. Biederman <[email protected]>
1 parent 905ae01 commit b6c3365

File tree

2 files changed

+21
-36
lines changed

2 files changed

+21
-36
lines changed

include/linux/user_namespace.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ struct ucounts {
9595
struct hlist_node node;
9696
struct user_namespace *ns;
9797
kuid_t uid;
98-
int count;
98+
atomic_t count;
9999
atomic_long_t ucount[UCOUNT_COUNTS];
100100
};
101101

@@ -107,7 +107,7 @@ void retire_userns_sysctls(struct user_namespace *ns);
107107
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid, enum ucount_type type);
108108
void dec_ucount(struct ucounts *ucounts, enum ucount_type type);
109109
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid);
110-
struct ucounts *get_ucounts(struct ucounts *ucounts);
110+
struct ucounts * __must_check get_ucounts(struct ucounts *ucounts);
111111
void put_ucounts(struct ucounts *ucounts);
112112

113113
#ifdef CONFIG_USER_NS

kernel/ucount.c

Lines changed: 19 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
struct ucounts init_ucounts = {
1212
.ns = &init_user_ns,
1313
.uid = GLOBAL_ROOT_UID,
14-
.count = 1,
14+
.count = ATOMIC_INIT(1),
1515
};
1616

1717
#define UCOUNTS_HASHTABLE_BITS 10
@@ -139,6 +139,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
139139
spin_unlock_irq(&ucounts_lock);
140140
}
141141

142+
struct ucounts *get_ucounts(struct ucounts *ucounts)
143+
{
144+
if (ucounts && atomic_add_negative(1, &ucounts->count)) {
145+
put_ucounts(ucounts);
146+
ucounts = NULL;
147+
}
148+
return ucounts;
149+
}
150+
142151
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
143152
{
144153
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
@@ -155,57 +164,33 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
155164

156165
new->ns = ns;
157166
new->uid = uid;
158-
new->count = 0;
167+
atomic_set(&new->count, 1);
159168

160169
spin_lock_irq(&ucounts_lock);
161170
ucounts = find_ucounts(ns, uid, hashent);
162171
if (ucounts) {
163172
kfree(new);
164173
} else {
165174
hlist_add_head(&new->node, hashent);
166-
ucounts = new;
175+
spin_unlock_irq(&ucounts_lock);
176+
return new;
167177
}
168178
}
169-
if (ucounts->count == INT_MAX)
170-
ucounts = NULL;
171-
else
172-
ucounts->count += 1;
173179
spin_unlock_irq(&ucounts_lock);
174-
return ucounts;
175-
}
176-
177-
struct ucounts *get_ucounts(struct ucounts *ucounts)
178-
{
179-
unsigned long flags;
180-
181-
if (!ucounts)
182-
return NULL;
183-
184-
spin_lock_irqsave(&ucounts_lock, flags);
185-
if (ucounts->count == INT_MAX) {
186-
WARN_ONCE(1, "ucounts: counter has reached its maximum value");
187-
ucounts = NULL;
188-
} else {
189-
ucounts->count += 1;
190-
}
191-
spin_unlock_irqrestore(&ucounts_lock, flags);
192-
180+
ucounts = get_ucounts(ucounts);
193181
return ucounts;
194182
}
195183

196184
void put_ucounts(struct ucounts *ucounts)
197185
{
198186
unsigned long flags;
199187

200-
spin_lock_irqsave(&ucounts_lock, flags);
201-
ucounts->count -= 1;
202-
if (!ucounts->count)
188+
if (atomic_dec_and_test(&ucounts->count)) {
189+
spin_lock_irqsave(&ucounts_lock, flags);
203190
hlist_del_init(&ucounts->node);
204-
else
205-
ucounts = NULL;
206-
spin_unlock_irqrestore(&ucounts_lock, flags);
207-
208-
kfree(ucounts);
191+
spin_unlock_irqrestore(&ucounts_lock, flags);
192+
kfree(ucounts);
193+
}
209194
}
210195

211196
static inline bool atomic_long_inc_below(atomic_long_t *v, int u)

0 commit comments

Comments
 (0)