Skip to content

Commit a435b9a

Browse files
author
Peter Zijlstra
committed
locking/refcount: Provide __refcount API to obtain the old value
David requested means to obtain the old/previous value from the refcount API for tracing purposes. Duplicate (most of) the API as __refcount*() with an additional 'int *' argument into which, if !NULL, the old value will be stored. Requested-by: David Howells <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Kees Cook <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 6eb6d05 commit a435b9a

File tree

1 file changed

+57
-8
lines changed

1 file changed

+57
-8
lines changed

include/linux/refcount.h

Lines changed: 57 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ static inline unsigned int refcount_read(const refcount_t *r)
165165
*
166166
* Return: false if the passed refcount is 0, true otherwise
167167
*/
168-
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
168+
static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
169169
{
170170
int old = refcount_read(r);
171171

@@ -174,12 +174,20 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
174174
break;
175175
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
176176

177+
if (oldp)
178+
*oldp = old;
179+
177180
if (unlikely(old < 0 || old + i < 0))
178181
refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
179182

180183
return old;
181184
}
182185

186+
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
187+
{
188+
return __refcount_add_not_zero(i, r, NULL);
189+
}
190+
183191
/**
184192
* refcount_add - add a value to a refcount
185193
* @i: the value to add to the refcount
@@ -196,16 +204,24 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
196204
* cases, refcount_inc(), or one of its variants, should instead be used to
197205
* increment a reference count.
198206
*/
199-
static inline void refcount_add(int i, refcount_t *r)
207+
static inline void __refcount_add(int i, refcount_t *r, int *oldp)
200208
{
201209
int old = atomic_fetch_add_relaxed(i, &r->refs);
202210

211+
if (oldp)
212+
*oldp = old;
213+
203214
if (unlikely(!old))
204215
refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
205216
else if (unlikely(old < 0 || old + i < 0))
206217
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
207218
}
208219

220+
static inline void refcount_add(int i, refcount_t *r)
221+
{
222+
__refcount_add(i, r, NULL);
223+
}
224+
209225
/**
210226
* refcount_inc_not_zero - increment a refcount unless it is 0
211227
* @r: the refcount to increment
@@ -219,9 +235,14 @@ static inline void refcount_add(int i, refcount_t *r)
219235
*
220236
* Return: true if the increment was successful, false otherwise
221237
*/
238+
static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
239+
{
240+
return __refcount_add_not_zero(1, r, oldp);
241+
}
242+
222243
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
223244
{
224-
return refcount_add_not_zero(1, r);
245+
return __refcount_inc_not_zero(r, NULL);
225246
}
226247

227248
/**
@@ -236,9 +257,14 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
236257
* Will WARN if the refcount is 0, as this represents a possible use-after-free
237258
* condition.
238259
*/
260+
static inline void __refcount_inc(refcount_t *r, int *oldp)
261+
{
262+
__refcount_add(1, r, oldp);
263+
}
264+
239265
static inline void refcount_inc(refcount_t *r)
240266
{
241-
refcount_add(1, r);
267+
__refcount_inc(r, NULL);
242268
}
243269

244270
/**
@@ -261,10 +287,13 @@ static inline void refcount_inc(refcount_t *r)
261287
*
262288
* Return: true if the resulting refcount is 0, false otherwise
263289
*/
264-
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
290+
static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
265291
{
266292
int old = atomic_fetch_sub_release(i, &r->refs);
267293

294+
if (oldp)
295+
*oldp = old;
296+
268297
if (old == i) {
269298
smp_acquire__after_ctrl_dep();
270299
return true;
@@ -276,6 +305,11 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
276305
return false;
277306
}
278307

308+
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
309+
{
310+
return __refcount_sub_and_test(i, r, NULL);
311+
}
312+
279313
/**
280314
* refcount_dec_and_test - decrement a refcount and test if it is 0
281315
* @r: the refcount
@@ -289,9 +323,14 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
289323
*
290324
* Return: true if the resulting refcount is 0, false otherwise
291325
*/
326+
static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
327+
{
328+
return __refcount_sub_and_test(1, r, oldp);
329+
}
330+
292331
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
293332
{
294-
return refcount_sub_and_test(1, r);
333+
return __refcount_dec_and_test(r, NULL);
295334
}
296335

297336
/**
@@ -304,12 +343,22 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r)
304343
* Provides release memory ordering, such that prior loads and stores are done
305344
* before.
306345
*/
307-
static inline void refcount_dec(refcount_t *r)
346+
static inline void __refcount_dec(refcount_t *r, int *oldp)
308347
{
309-
if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
348+
int old = atomic_fetch_sub_release(1, &r->refs);
349+
350+
if (oldp)
351+
*oldp = old;
352+
353+
if (unlikely(old <= 1))
310354
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
311355
}
312356

357+
static inline void refcount_dec(refcount_t *r)
358+
{
359+
__refcount_dec(r, NULL);
360+
}
361+
313362
extern __must_check bool refcount_dec_if_one(refcount_t *r);
314363
extern __must_check bool refcount_dec_not_one(refcount_t *r);
315364
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);

0 commit comments

Comments
 (0)