@@ -151,11 +151,6 @@ static struct pcpu_chunk *pcpu_first_chunk;
151
151
static struct pcpu_chunk * pcpu_reserved_chunk ;
152
152
static int pcpu_reserved_chunk_limit ;
153
153
154
- /*
155
- * Free path accesses and alters only the index data structures and can be
156
- * safely called from atomic context. When memory needs to be returned to
157
- * the system, free path schedules reclaim_work.
158
- */
159
154
static DEFINE_SPINLOCK (pcpu_lock ); /* all internal data structures */
160
155
static DEFINE_MUTEX (pcpu_alloc_mutex ); /* chunk create/destroy, [de]pop */
161
156
@@ -727,20 +722,21 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
727
722
* @size: size of area to allocate in bytes
728
723
* @align: alignment of area (max PAGE_SIZE)
729
724
* @reserved: allocate from the reserved chunk if available
725
+ * @gfp: allocation flags
730
726
*
731
- * Allocate percpu area of @size bytes aligned at @align.
732
- *
733
- * CONTEXT:
734
- * Does GFP_KERNEL allocation.
727
+ * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
728
+ * contain %GFP_KERNEL, the allocation is atomic.
735
729
*
736
730
* RETURNS:
737
731
* Percpu pointer to the allocated area on success, NULL on failure.
738
732
*/
739
- static void __percpu * pcpu_alloc (size_t size , size_t align , bool reserved )
733
+ static void __percpu * pcpu_alloc (size_t size , size_t align , bool reserved ,
734
+ gfp_t gfp )
740
735
{
741
736
static int warn_limit = 10 ;
742
737
struct pcpu_chunk * chunk ;
743
738
const char * err ;
739
+ bool is_atomic = !(gfp & GFP_KERNEL );
744
740
int slot , off , new_alloc , cpu , ret ;
745
741
unsigned long flags ;
746
742
void __percpu * ptr ;
@@ -773,14 +769,15 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
773
769
774
770
while ((new_alloc = pcpu_need_to_extend (chunk ))) {
775
771
spin_unlock_irqrestore (& pcpu_lock , flags );
776
- if (pcpu_extend_area_map (chunk , new_alloc ) < 0 ) {
772
+ if (is_atomic ||
773
+ pcpu_extend_area_map (chunk , new_alloc ) < 0 ) {
777
774
err = "failed to extend area map of reserved chunk" ;
778
775
goto fail ;
779
776
}
780
777
spin_lock_irqsave (& pcpu_lock , flags );
781
778
}
782
779
783
- off = pcpu_alloc_area (chunk , size , align , false );
780
+ off = pcpu_alloc_area (chunk , size , align , is_atomic );
784
781
if (off >= 0 )
785
782
goto area_found ;
786
783
@@ -797,6 +794,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
797
794
798
795
new_alloc = pcpu_need_to_extend (chunk );
799
796
if (new_alloc ) {
797
+ if (is_atomic )
798
+ continue ;
800
799
spin_unlock_irqrestore (& pcpu_lock , flags );
801
800
if (pcpu_extend_area_map (chunk ,
802
801
new_alloc ) < 0 ) {
@@ -811,7 +810,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
811
810
goto restart ;
812
811
}
813
812
814
- off = pcpu_alloc_area (chunk , size , align , false );
813
+ off = pcpu_alloc_area (chunk , size , align , is_atomic );
815
814
if (off >= 0 )
816
815
goto area_found ;
817
816
}
@@ -824,6 +823,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
824
823
* tasks to create chunks simultaneously. Serialize and create iff
825
824
* there's still no empty chunk after grabbing the mutex.
826
825
*/
826
+ if (is_atomic )
827
+ goto fail ;
828
+
827
829
mutex_lock (& pcpu_alloc_mutex );
828
830
829
831
if (list_empty (& pcpu_slot [pcpu_nr_slots - 1 ])) {
@@ -846,7 +848,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
846
848
spin_unlock_irqrestore (& pcpu_lock , flags );
847
849
848
850
/* populate if not all pages are already there */
849
- if (true ) {
851
+ if (! is_atomic ) {
850
852
int page_start , page_end , rs , re ;
851
853
852
854
mutex_lock (& pcpu_alloc_mutex );
@@ -884,9 +886,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
884
886
fail_unlock :
885
887
spin_unlock_irqrestore (& pcpu_lock , flags );
886
888
fail :
887
- if (warn_limit ) {
888
- pr_warning ("PERCPU: allocation failed, size=%zu align=%zu, "
889
- "%s\n" , size , align , err );
889
+ if (! is_atomic && warn_limit ) {
890
+ pr_warning ("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n" ,
891
+ size , align , is_atomic , err );
890
892
dump_stack ();
891
893
if (!-- warn_limit )
892
894
pr_info ("PERCPU: limit reached, disable warning\n" );
@@ -895,22 +897,34 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
895
897
}
896
898
897
899
/**
898
- * __alloc_percpu - allocate dynamic percpu area
900
+ * __alloc_percpu_gfp - allocate dynamic percpu area
899
901
* @size: size of area to allocate in bytes
900
902
* @align: alignment of area (max PAGE_SIZE)
903
+ * @gfp: allocation flags
901
904
*
902
- * Allocate zero-filled percpu area of @size bytes aligned at @align.
903
- * Might sleep. Might trigger writeouts.
904
- *
905
- * CONTEXT:
906
- * Does GFP_KERNEL allocation.
905
+ * Allocate zero-filled percpu area of @size bytes aligned at @align. If
906
+ * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
907
+ * be called from any context but is a lot more likely to fail.
907
908
*
908
909
* RETURNS:
909
910
* Percpu pointer to the allocated area on success, NULL on failure.
910
911
*/
912
+ void __percpu * __alloc_percpu_gfp (size_t size , size_t align , gfp_t gfp )
913
+ {
914
+ return pcpu_alloc (size , align , false, gfp );
915
+ }
916
+ EXPORT_SYMBOL_GPL (__alloc_percpu_gfp );
917
+
918
+ /**
919
+ * __alloc_percpu - allocate dynamic percpu area
920
+ * @size: size of area to allocate in bytes
921
+ * @align: alignment of area (max PAGE_SIZE)
922
+ *
923
+ * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
924
+ */
911
925
void __percpu * __alloc_percpu (size_t size , size_t align )
912
926
{
913
- return pcpu_alloc (size , align , false);
927
+ return pcpu_alloc (size , align , false, GFP_KERNEL );
914
928
}
915
929
EXPORT_SYMBOL_GPL (__alloc_percpu );
916
930
@@ -932,7 +946,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
932
946
*/
933
947
void __percpu * __alloc_reserved_percpu (size_t size , size_t align )
934
948
{
935
- return pcpu_alloc (size , align , true);
949
+ return pcpu_alloc (size , align , true, GFP_KERNEL );
936
950
}
937
951
938
952
/**
0 commit comments