@@ -1078,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1078
1078
* @bits: the bits to set in this range
1079
1079
* @clear_bits: the bits to clear in this range
1080
1080
* @cached_state: state that we're going to cache
1081
- * @mask: the allocation mask
1082
1081
*
1083
1082
* This will go through and set bits for the given range. If any states exist
1084
1083
* already in this range they are set with the given bit and cleared of the
1085
1084
* clear_bits. This is only meant to be used by things that are mergeable, ie
1086
1085
* converting from say DELALLOC to DIRTY. This is not meant to be used with
1087
1086
* boundary bits like LOCK.
1087
+ *
1088
+ * All allocations are done with GFP_NOFS.
1088
1089
*/
1089
1090
int convert_extent_bit (struct extent_io_tree * tree , u64 start , u64 end ,
1090
1091
unsigned bits , unsigned clear_bits ,
1091
- struct extent_state * * cached_state , gfp_t mask )
1092
+ struct extent_state * * cached_state )
1092
1093
{
1093
1094
struct extent_state * state ;
1094
1095
struct extent_state * prealloc = NULL ;
@@ -1103,15 +1104,15 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1103
1104
btrfs_debug_check_extent_io_range (tree , start , end );
1104
1105
1105
1106
again :
1106
- if (!prealloc && gfpflags_allow_blocking ( mask ) ) {
1107
+ if (!prealloc ) {
1107
1108
/*
1108
1109
* Best effort, don't worry if extent state allocation fails
1109
1110
* here for the first iteration. We might have a cached state
1110
1111
* that matches exactly the target range, in which case no
1111
1112
* extent state allocations are needed. We'll only know this
1112
1113
* after locking the tree.
1113
1114
*/
1114
- prealloc = alloc_extent_state (mask );
1115
+ prealloc = alloc_extent_state (GFP_NOFS );
1115
1116
if (!prealloc && !first_iteration )
1116
1117
return - ENOMEM ;
1117
1118
}
@@ -1272,8 +1273,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1272
1273
if (start > end )
1273
1274
goto out ;
1274
1275
spin_unlock (& tree -> lock );
1275
- if (gfpflags_allow_blocking (mask ))
1276
- cond_resched ();
1276
+ cond_resched ();
1277
1277
first_iteration = false;
1278
1278
goto again ;
1279
1279
0 commit comments