Skip to content

Commit 2b55d6a

Browse files
urezkitehcaster
authored andcommitted
rcu/kvfree: Add kvfree_rcu_barrier() API
Add a kvfree_rcu_barrier() function. It waits until all in-flight pointers are freed over RCU machinery. It does not wait any GP completion and it is within its right to return immediately if there are no outstanding pointers. This function is useful when there is a need to guarantee that a memory is fully freed before destroying memory caches. For example, during unloading a kernel module. Signed-off-by: Uladzislau Rezki (Sony) <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 2eb14c1 commit 2b55d6a

File tree

3 files changed

+107
-8
lines changed

3 files changed

+107
-8
lines changed

include/linux/rcutiny.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
111111
kvfree(ptr);
112112
}
113113

114+
static inline void kvfree_rcu_barrier(void)
115+
{
116+
rcu_barrier();
117+
}
118+
114119
#ifdef CONFIG_KASAN_GENERIC
115120
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
116121
#else

include/linux/rcutree.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ static inline void rcu_virt_note_context_switch(void)
3535

3636
void synchronize_rcu_expedited(void);
3737
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
38+
void kvfree_rcu_barrier(void);
3839

3940
void rcu_barrier(void);
4041
void rcu_momentary_dyntick_idle(void);

kernel/rcu/tree.c

Lines changed: 101 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3584,18 +3584,15 @@ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
35843584
}
35853585

35863586
/*
3587-
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3587+
* Return: %true if a work is queued, %false otherwise.
35883588
*/
3589-
static void kfree_rcu_monitor(struct work_struct *work)
3589+
static bool
3590+
kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp)
35903591
{
3591-
struct kfree_rcu_cpu *krcp = container_of(work,
3592-
struct kfree_rcu_cpu, monitor_work.work);
35933592
unsigned long flags;
3593+
bool queued = false;
35943594
int i, j;
35953595

3596-
// Drain ready for reclaim.
3597-
kvfree_rcu_drain_ready(krcp);
3598-
35993596
raw_spin_lock_irqsave(&krcp->lock, flags);
36003597

36013598
// Attempt to start a new batch.
@@ -3634,11 +3631,27 @@ static void kfree_rcu_monitor(struct work_struct *work)
36343631
// be that the work is in the pending state when
36353632
// channels have been detached following by each
36363633
// other.
3637-
queue_rcu_work(system_wq, &krwp->rcu_work);
3634+
queued = queue_rcu_work(system_wq, &krwp->rcu_work);
36383635
}
36393636
}
36403637

36413638
raw_spin_unlock_irqrestore(&krcp->lock, flags);
3639+
return queued;
3640+
}
3641+
3642+
/*
3643+
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3644+
*/
3645+
static void kfree_rcu_monitor(struct work_struct *work)
3646+
{
3647+
struct kfree_rcu_cpu *krcp = container_of(work,
3648+
struct kfree_rcu_cpu, monitor_work.work);
3649+
3650+
// Drain ready for reclaim.
3651+
kvfree_rcu_drain_ready(krcp);
3652+
3653+
// Queue a batch for a rest.
3654+
kvfree_rcu_queue_batch(krcp);
36423655

36433656
// If there is nothing to detach, it means that our job is
36443657
// successfully done here. In case of having at least one
@@ -3859,6 +3872,86 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
38593872
}
38603873
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
38613874

3875+
/**
3876+
* kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
3877+
*
3878+
* Note that a single argument of kvfree_rcu() call has a slow path that
3879+
* triggers synchronize_rcu() following by freeing a pointer. It is done
3880+
* before the return from the function. Therefore for any single-argument
3881+
* call that will result in a kfree() to a cache that is to be destroyed
3882+
* during module exit, it is developer's responsibility to ensure that all
3883+
* such calls have returned before the call to kmem_cache_destroy().
3884+
*/
3885+
void kvfree_rcu_barrier(void)
3886+
{
3887+
struct kfree_rcu_cpu_work *krwp;
3888+
struct kfree_rcu_cpu *krcp;
3889+
bool queued;
3890+
int i, cpu;
3891+
3892+
/*
3893+
* Firstly we detach objects and queue them over an RCU-batch
3894+
* for all CPUs. Finally queued works are flushed for each CPU.
3895+
*
3896+
* Please note. If there are outstanding batches for a particular
3897+
* CPU, those have to be finished first following by queuing a new.
3898+
*/
3899+
for_each_possible_cpu(cpu) {
3900+
krcp = per_cpu_ptr(&krc, cpu);
3901+
3902+
/*
3903+
* Check if this CPU has any objects which have been queued for a
3904+
* new GP completion. If not(means nothing to detach), we are done
3905+
* with it. If any batch is pending/running for this "krcp", below
3906+
* per-cpu flush_rcu_work() waits its completion(see last step).
3907+
*/
3908+
if (!need_offload_krc(krcp))
3909+
continue;
3910+
3911+
while (1) {
3912+
/*
3913+
* If we are not able to queue a new RCU work it means:
3914+
* - batches for this CPU are still in flight which should
3915+
* be flushed first and then repeat;
3916+
* - no objects to detach, because of concurrency.
3917+
*/
3918+
queued = kvfree_rcu_queue_batch(krcp);
3919+
3920+
/*
3921+
* Bail out, if there is no need to offload this "krcp"
3922+
* anymore. As noted earlier it can run concurrently.
3923+
*/
3924+
if (queued || !need_offload_krc(krcp))
3925+
break;
3926+
3927+
/* There are ongoing batches. */
3928+
for (i = 0; i < KFREE_N_BATCHES; i++) {
3929+
krwp = &(krcp->krw_arr[i]);
3930+
flush_rcu_work(&krwp->rcu_work);
3931+
}
3932+
}
3933+
}
3934+
3935+
/*
3936+
* Now we guarantee that all objects are flushed.
3937+
*/
3938+
for_each_possible_cpu(cpu) {
3939+
krcp = per_cpu_ptr(&krc, cpu);
3940+
3941+
/*
3942+
* A monitor work can drain ready to reclaim objects
3943+
* directly. Wait its completion if running or pending.
3944+
*/
3945+
cancel_delayed_work_sync(&krcp->monitor_work);
3946+
3947+
for (i = 0; i < KFREE_N_BATCHES; i++) {
3948+
krwp = &(krcp->krw_arr[i]);
3949+
flush_rcu_work(&krwp->rcu_work);
3950+
}
3951+
}
3952+
}
3953+
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
3954+
38623955
static unsigned long
38633956
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
38643957
{

0 commit comments

Comments
 (0)