@@ -606,7 +606,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
606
606
* by this look, but we want to avoid concurrent calls for performance
607
607
* reasons and to make the pcpu_get_vm_areas more deterministic.
608
608
*/
609
- static DEFINE_SPINLOCK (vmap_purge_lock );
609
+ static DEFINE_MUTEX (vmap_purge_lock );
610
610
611
611
/* for per-CPU blocks */
612
612
static void purge_fragmented_blocks_allcpus (void );
@@ -660,9 +660,9 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
660
660
*/
661
661
static void try_purge_vmap_area_lazy (void )
662
662
{
663
- if (spin_trylock (& vmap_purge_lock )) {
663
+ if (mutex_trylock (& vmap_purge_lock )) {
664
664
__purge_vmap_area_lazy (ULONG_MAX , 0 );
665
- spin_unlock (& vmap_purge_lock );
665
+ mutex_unlock (& vmap_purge_lock );
666
666
}
667
667
}
668
668
@@ -671,10 +671,10 @@ static void try_purge_vmap_area_lazy(void)
671
671
*/
672
672
static void purge_vmap_area_lazy (void )
673
673
{
674
- spin_lock (& vmap_purge_lock );
674
+ mutex_lock (& vmap_purge_lock );
675
675
purge_fragmented_blocks_allcpus ();
676
676
__purge_vmap_area_lazy (ULONG_MAX , 0 );
677
- spin_unlock (& vmap_purge_lock );
677
+ mutex_unlock (& vmap_purge_lock );
678
678
}
679
679
680
680
/*
@@ -1063,11 +1063,11 @@ void vm_unmap_aliases(void)
1063
1063
rcu_read_unlock ();
1064
1064
}
1065
1065
1066
- spin_lock (& vmap_purge_lock );
1066
+ mutex_lock (& vmap_purge_lock );
1067
1067
purge_fragmented_blocks_allcpus ();
1068
1068
if (!__purge_vmap_area_lazy (start , end ) && flush )
1069
1069
flush_tlb_kernel_range (start , end );
1070
- spin_unlock (& vmap_purge_lock );
1070
+ mutex_unlock (& vmap_purge_lock );
1071
1071
}
1072
1072
EXPORT_SYMBOL_GPL (vm_unmap_aliases );
1073
1073
0 commit comments