@@ -3059,16 +3059,19 @@ static void update_balance_args(struct btrfs_balance_control *bctl)
3059
3059
* (albeit full) chunks.
3060
3060
*/
3061
3061
if (!(bctl -> data .flags & BTRFS_BALANCE_ARGS_USAGE ) &&
3062
+ !(bctl -> data .flags & BTRFS_BALANCE_ARGS_USAGE_RANGE ) &&
3062
3063
!(bctl -> data .flags & BTRFS_BALANCE_ARGS_CONVERT )) {
3063
3064
bctl -> data .flags |= BTRFS_BALANCE_ARGS_USAGE ;
3064
3065
bctl -> data .usage = 90 ;
3065
3066
}
3066
3067
if (!(bctl -> sys .flags & BTRFS_BALANCE_ARGS_USAGE ) &&
3068
+ !(bctl -> sys .flags & BTRFS_BALANCE_ARGS_USAGE_RANGE ) &&
3067
3069
!(bctl -> sys .flags & BTRFS_BALANCE_ARGS_CONVERT )) {
3068
3070
bctl -> sys .flags |= BTRFS_BALANCE_ARGS_USAGE ;
3069
3071
bctl -> sys .usage = 90 ;
3070
3072
}
3071
3073
if (!(bctl -> meta .flags & BTRFS_BALANCE_ARGS_USAGE ) &&
3074
+ !(bctl -> meta .flags & BTRFS_BALANCE_ARGS_USAGE_RANGE ) &&
3072
3075
!(bctl -> meta .flags & BTRFS_BALANCE_ARGS_CONVERT )) {
3073
3076
bctl -> meta .flags |= BTRFS_BALANCE_ARGS_USAGE ;
3074
3077
bctl -> meta .usage = 90 ;
@@ -3122,6 +3125,39 @@ static int chunk_profiles_filter(u64 chunk_type,
3122
3125
3123
3126
static int chunk_usage_filter (struct btrfs_fs_info * fs_info , u64 chunk_offset ,
3124
3127
struct btrfs_balance_args * bargs )
3128
+ {
3129
+ struct btrfs_block_group_cache * cache ;
3130
+ u64 chunk_used ;
3131
+ u64 user_thresh_min ;
3132
+ u64 user_thresh_max ;
3133
+ int ret = 1 ;
3134
+
3135
+ cache = btrfs_lookup_block_group (fs_info , chunk_offset );
3136
+ chunk_used = btrfs_block_group_used (& cache -> item );
3137
+
3138
+ if (bargs -> usage_min == 0 )
3139
+ user_thresh_min = 0 ;
3140
+ else
3141
+ user_thresh_min = div_factor_fine (cache -> key .offset ,
3142
+ bargs -> usage_min );
3143
+
3144
+ if (bargs -> usage_max == 0 )
3145
+ user_thresh_max = 1 ;
3146
+ else if (bargs -> usage_max > 100 )
3147
+ user_thresh_max = cache -> key .offset ;
3148
+ else
3149
+ user_thresh_max = div_factor_fine (cache -> key .offset ,
3150
+ bargs -> usage_max );
3151
+
3152
+ if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max )
3153
+ ret = 0 ;
3154
+
3155
+ btrfs_put_block_group (cache );
3156
+ return ret ;
3157
+ }
3158
+
3159
+ static int chunk_usage_range_filter (struct btrfs_fs_info * fs_info ,
3160
+ u64 chunk_offset , struct btrfs_balance_args * bargs )
3125
3161
{
3126
3162
struct btrfs_block_group_cache * cache ;
3127
3163
u64 chunk_used , user_thresh ;
@@ -3130,7 +3166,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3130
3166
cache = btrfs_lookup_block_group (fs_info , chunk_offset );
3131
3167
chunk_used = btrfs_block_group_used (& cache -> item );
3132
3168
3133
- if (bargs -> usage == 0 )
3169
+ if (bargs -> usage_min == 0 )
3134
3170
user_thresh = 1 ;
3135
3171
else if (bargs -> usage > 100 )
3136
3172
user_thresh = cache -> key .offset ;
@@ -3279,6 +3315,9 @@ static int should_balance_chunk(struct btrfs_root *root,
3279
3315
if ((bargs -> flags & BTRFS_BALANCE_ARGS_USAGE ) &&
3280
3316
chunk_usage_filter (bctl -> fs_info , chunk_offset , bargs )) {
3281
3317
return 0 ;
3318
+ } else if ((bargs -> flags & BTRFS_BALANCE_ARGS_USAGE_RANGE ) &&
3319
+ chunk_usage_range_filter (bctl -> fs_info , chunk_offset , bargs )) {
3320
+ return 0 ;
3282
3321
}
3283
3322
3284
3323
/* devid filter */
0 commit comments