@@ -2657,7 +2657,8 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2657
2657
struct btrfs_block_rsv * global_rsv ;
2658
2658
u64 num_heads = trans -> transaction -> delayed_refs .num_heads_ready ;
2659
2659
u64 csum_bytes = trans -> transaction -> delayed_refs .pending_csums ;
2660
- u64 num_bytes ;
2660
+ u64 num_dirty_bgs = trans -> transaction -> num_dirty_bgs ;
2661
+ u64 num_bytes , num_dirty_bgs_bytes ;
2661
2662
int ret = 0 ;
2662
2663
2663
2664
num_bytes = btrfs_calc_trans_metadata_size (root , 1 );
@@ -2666,17 +2667,21 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2666
2667
num_bytes += (num_heads - 1 ) * root -> nodesize ;
2667
2668
num_bytes <<= 1 ;
2668
2669
num_bytes += btrfs_csum_bytes_to_leaves (root , csum_bytes ) * root -> nodesize ;
2670
+ num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size (root ,
2671
+ num_dirty_bgs );
2669
2672
global_rsv = & root -> fs_info -> global_block_rsv ;
2670
2673
2671
2674
/*
2672
2675
* If we can't allocate any more chunks lets make sure we have _lots_ of
2673
2676
* wiggle room since running delayed refs can create more delayed refs.
2674
2677
*/
2675
- if (global_rsv -> space_info -> full )
2678
+ if (global_rsv -> space_info -> full ) {
2679
+ num_dirty_bgs_bytes <<= 1 ;
2676
2680
num_bytes <<= 1 ;
2681
+ }
2677
2682
2678
2683
spin_lock (& global_rsv -> lock );
2679
- if (global_rsv -> reserved <= num_bytes )
2684
+ if (global_rsv -> reserved <= num_bytes + num_dirty_bgs_bytes )
2680
2685
ret = 1 ;
2681
2686
spin_unlock (& global_rsv -> lock );
2682
2687
return ret ;
@@ -5408,6 +5413,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
5408
5413
if (list_empty (& cache -> dirty_list )) {
5409
5414
list_add_tail (& cache -> dirty_list ,
5410
5415
& trans -> transaction -> dirty_bgs );
5416
+ trans -> transaction -> num_dirty_bgs ++ ;
5411
5417
btrfs_get_block_group (cache );
5412
5418
}
5413
5419
spin_unlock (& trans -> transaction -> dirty_bgs_lock );
0 commit comments