@@ -1998,43 +1998,39 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
1998
1998
*
1999
1999
* Return 0 for success insert
2000
2000
* Return >0 for existing record, caller can free @record safely.
2001
- * Error is not possible
2001
+ * Return <0 for insertion failure, caller can free @record safely.
2002
2002
*/
2003
2003
int btrfs_qgroup_trace_extent_nolock (struct btrfs_fs_info * fs_info ,
2004
2004
struct btrfs_delayed_ref_root * delayed_refs ,
2005
2005
struct btrfs_qgroup_extent_record * record )
2006
2006
{
2007
- struct rb_node * * p = & delayed_refs -> dirty_extent_root .rb_node ;
2008
- struct rb_node * parent_node = NULL ;
2009
- struct btrfs_qgroup_extent_record * entry ;
2010
- u64 bytenr = record -> bytenr ;
2007
+ struct btrfs_qgroup_extent_record * existing , * ret ;
2008
+ unsigned long bytenr = record -> bytenr ;
2011
2009
2012
2010
if (!btrfs_qgroup_full_accounting (fs_info ))
2013
2011
return 1 ;
2014
2012
2015
2013
lockdep_assert_held (& delayed_refs -> lock );
2016
2014
trace_btrfs_qgroup_trace_extent (fs_info , record );
2017
2015
2018
- while (* p ) {
2019
- parent_node = * p ;
2020
- entry = rb_entry (parent_node , struct btrfs_qgroup_extent_record ,
2021
- node );
2022
- if (bytenr < entry -> bytenr ) {
2023
- p = & (* p )-> rb_left ;
2024
- } else if (bytenr > entry -> bytenr ) {
2025
- p = & (* p )-> rb_right ;
2026
- } else {
2027
- if (record -> data_rsv && !entry -> data_rsv ) {
2028
- entry -> data_rsv = record -> data_rsv ;
2029
- entry -> data_rsv_refroot =
2030
- record -> data_rsv_refroot ;
2031
- }
2032
- return 1 ;
2016
+ xa_lock (& delayed_refs -> dirty_extents );
2017
+ existing = xa_load (& delayed_refs -> dirty_extents , bytenr );
2018
+ if (existing ) {
2019
+ if (record -> data_rsv && !existing -> data_rsv ) {
2020
+ existing -> data_rsv = record -> data_rsv ;
2021
+ existing -> data_rsv_refroot = record -> data_rsv_refroot ;
2033
2022
}
2023
+ xa_unlock (& delayed_refs -> dirty_extents );
2024
+ return 1 ;
2025
+ }
2026
+
2027
+ ret = __xa_store (& delayed_refs -> dirty_extents , record -> bytenr , record , GFP_ATOMIC );
2028
+ xa_unlock (& delayed_refs -> dirty_extents );
2029
+ if (xa_is_err (ret )) {
2030
+ qgroup_mark_inconsistent (fs_info );
2031
+ return xa_err (ret );
2034
2032
}
2035
2033
2036
- rb_link_node (& record -> node , parent_node , p );
2037
- rb_insert_color (& record -> node , & delayed_refs -> dirty_extent_root );
2038
2034
return 0 ;
2039
2035
}
2040
2036
@@ -2141,6 +2137,11 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2141
2137
if (!record )
2142
2138
return - ENOMEM ;
2143
2139
2140
+ if (xa_reserve (& trans -> transaction -> delayed_refs .dirty_extents , bytenr , GFP_NOFS )) {
2141
+ kfree (record );
2142
+ return - ENOMEM ;
2143
+ }
2144
+
2144
2145
delayed_refs = & trans -> transaction -> delayed_refs ;
2145
2146
record -> bytenr = bytenr ;
2146
2147
record -> num_bytes = num_bytes ;
@@ -2149,7 +2150,9 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2149
2150
spin_lock (& delayed_refs -> lock );
2150
2151
ret = btrfs_qgroup_trace_extent_nolock (fs_info , delayed_refs , record );
2151
2152
spin_unlock (& delayed_refs -> lock );
2152
- if (ret > 0 ) {
2153
+ if (ret ) {
2154
+ /* Clean up if insertion fails or item exists. */
2155
+ xa_release (& delayed_refs -> dirty_extents , record -> bytenr );
2153
2156
kfree (record );
2154
2157
return 0 ;
2155
2158
}
@@ -3018,7 +3021,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
3018
3021
struct btrfs_qgroup_extent_record * record ;
3019
3022
struct btrfs_delayed_ref_root * delayed_refs ;
3020
3023
struct ulist * new_roots = NULL ;
3021
- struct rb_node * node ;
3024
+ unsigned long index ;
3022
3025
u64 num_dirty_extents = 0 ;
3023
3026
u64 qgroup_to_skip ;
3024
3027
int ret = 0 ;
@@ -3028,10 +3031,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
3028
3031
3029
3032
delayed_refs = & trans -> transaction -> delayed_refs ;
3030
3033
qgroup_to_skip = delayed_refs -> qgroup_to_skip ;
3031
- while ((node = rb_first (& delayed_refs -> dirty_extent_root ))) {
3032
- record = rb_entry (node , struct btrfs_qgroup_extent_record ,
3033
- node );
3034
-
3034
+ xa_for_each (& delayed_refs -> dirty_extents , index , record ) {
3035
3035
num_dirty_extents ++ ;
3036
3036
trace_btrfs_qgroup_account_extents (fs_info , record );
3037
3037
@@ -3097,7 +3097,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
3097
3097
ulist_free (record -> old_roots );
3098
3098
ulist_free (new_roots );
3099
3099
new_roots = NULL ;
3100
- rb_erase ( node , & delayed_refs -> dirty_extent_root );
3100
+ xa_erase ( & delayed_refs -> dirty_extents , index );
3101
3101
kfree (record );
3102
3102
3103
3103
}
@@ -4874,15 +4874,13 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4874
4874
void btrfs_qgroup_destroy_extent_records (struct btrfs_transaction * trans )
4875
4875
{
4876
4876
struct btrfs_qgroup_extent_record * entry ;
4877
- struct btrfs_qgroup_extent_record * next ;
4878
- struct rb_root * root ;
4877
+ unsigned long index ;
4879
4878
4880
- root = & trans -> delayed_refs .dirty_extent_root ;
4881
- rbtree_postorder_for_each_entry_safe (entry , next , root , node ) {
4879
+ xa_for_each (& trans -> delayed_refs .dirty_extents , index , entry ) {
4882
4880
ulist_free (entry -> old_roots );
4883
4881
kfree (entry );
4884
4882
}
4885
- * root = RB_ROOT ;
4883
+ xa_destroy ( & trans -> delayed_refs . dirty_extents ) ;
4886
4884
}
4887
4885
4888
4886
void btrfs_free_squota_rsv (struct btrfs_fs_info * fs_info , u64 root , u64 rsv_bytes )
0 commit comments