@@ -266,6 +266,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
266
266
return percpu_ref_tryget_many (ref , 1 );
267
267
}
268
268
269
+ /**
270
+ * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
271
+ * caller is responsible for taking RCU.
272
+ *
273
+ * This function is safe to call as long as @ref is between init and exit.
274
+ */
275
+ static inline bool percpu_ref_tryget_live_rcu (struct percpu_ref * ref )
276
+ {
277
+ unsigned long __percpu * percpu_count ;
278
+ bool ret = false;
279
+
280
+ WARN_ON_ONCE (!rcu_read_lock_held ());
281
+
282
+ if (likely (__ref_is_percpu (ref , & percpu_count ))) {
283
+ this_cpu_inc (* percpu_count );
284
+ ret = true;
285
+ } else if (!(ref -> percpu_count_ptr & __PERCPU_REF_DEAD )) {
286
+ ret = atomic_long_inc_not_zero (& ref -> data -> count );
287
+ }
288
+ return ret ;
289
+ }
290
+
269
291
/**
270
292
* percpu_ref_tryget_live - try to increment a live percpu refcount
271
293
* @ref: percpu_ref to try-get
@@ -283,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
283
305
*/
284
306
static inline bool percpu_ref_tryget_live (struct percpu_ref * ref )
285
307
{
286
- unsigned long __percpu * percpu_count ;
287
308
bool ret = false;
288
309
289
310
rcu_read_lock ();
290
-
291
- if (__ref_is_percpu (ref , & percpu_count )) {
292
- this_cpu_inc (* percpu_count );
293
- ret = true;
294
- } else if (!(ref -> percpu_count_ptr & __PERCPU_REF_DEAD )) {
295
- ret = atomic_long_inc_not_zero (& ref -> data -> count );
296
- }
297
-
311
+ ret = percpu_ref_tryget_live_rcu (ref );
298
312
rcu_read_unlock ();
299
-
300
313
return ret ;
301
314
}
302
315
0 commit comments