@@ -130,6 +130,7 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
130
130
#ifdef CONFIG_MEMCG_KMEM
131
131
132
132
LIST_HEAD (slab_root_caches );
133
+ static DEFINE_SPINLOCK (memcg_kmem_wq_lock );
133
134
134
135
void slab_init_memcg_params (struct kmem_cache * s )
135
136
{
@@ -734,14 +735,22 @@ static void kmemcg_cache_deactivate(struct kmem_cache *s)
734
735
735
736
__kmemcg_cache_deactivate (s );
736
737
738
+ /*
739
+ * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
740
+ * flag and make sure that no new kmem_cache deactivation tasks
741
+ * are queued (see flush_memcg_workqueue() ).
742
+ */
743
+ spin_lock_irq (& memcg_kmem_wq_lock );
737
744
if (s -> memcg_params .root_cache -> memcg_params .dying )
738
- return ;
745
+ goto unlock ;
739
746
740
747
/* pin memcg so that @s doesn't get destroyed in the middle */
741
748
css_get (& s -> memcg_params .memcg -> css );
742
749
743
750
s -> memcg_params .work_fn = __kmemcg_cache_deactivate_after_rcu ;
744
751
call_rcu (& s -> memcg_params .rcu_head , kmemcg_rcufn );
752
+ unlock :
753
+ spin_unlock_irq (& memcg_kmem_wq_lock );
745
754
}
746
755
747
756
void memcg_deactivate_kmem_caches (struct mem_cgroup * memcg )
@@ -851,9 +860,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
851
860
852
861
static void flush_memcg_workqueue (struct kmem_cache * s )
853
862
{
854
- mutex_lock ( & slab_mutex );
863
+ spin_lock_irq ( & memcg_kmem_wq_lock );
855
864
s -> memcg_params .dying = true;
856
- mutex_unlock ( & slab_mutex );
865
+ spin_unlock_irq ( & memcg_kmem_wq_lock );
857
866
858
867
/*
859
868
* SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
0 commit comments