@@ -3476,6 +3476,7 @@ enum btrfs_extent_allocation_policy {
3476
3476
*/
3477
3477
struct find_free_extent_ctl {
3478
3478
/* Basic allocation info */
3479
+ u64 ram_bytes ;
3479
3480
u64 num_bytes ;
3480
3481
u64 empty_size ;
3481
3482
u64 flags ;
@@ -4130,73 +4131,70 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
4130
4131
* |- If not found, re-iterate all block groups
4131
4132
*/
4132
4133
static noinline int find_free_extent (struct btrfs_root * root ,
4133
- u64 ram_bytes , u64 num_bytes , u64 empty_size ,
4134
- u64 hint_byte_orig , struct btrfs_key * ins ,
4135
- u64 flags , int delalloc )
4134
+ struct btrfs_key * ins ,
4135
+ struct find_free_extent_ctl * ffe_ctl )
4136
4136
{
4137
4137
struct btrfs_fs_info * fs_info = root -> fs_info ;
4138
4138
int ret = 0 ;
4139
4139
int cache_block_group_error = 0 ;
4140
4140
struct btrfs_block_group * block_group = NULL ;
4141
- struct find_free_extent_ctl ffe_ctl = {0 };
4142
4141
struct btrfs_space_info * space_info ;
4143
4142
bool full_search = false;
4144
- bool for_treelog = (root -> root_key .objectid == BTRFS_TREE_LOG_OBJECTID );
4145
4143
4146
- WARN_ON (num_bytes < fs_info -> sectorsize );
4147
-
4148
- ffe_ctl .num_bytes = num_bytes ;
4149
- ffe_ctl .empty_size = empty_size ;
4150
- ffe_ctl .flags = flags ;
4151
- ffe_ctl .search_start = 0 ;
4152
- ffe_ctl .delalloc = delalloc ;
4153
- ffe_ctl .index = btrfs_bg_flags_to_raid_index (flags );
4154
- ffe_ctl .have_caching_bg = false;
4155
- ffe_ctl .orig_have_caching_bg = false;
4156
- ffe_ctl .found_offset = 0 ;
4157
- ffe_ctl .hint_byte = hint_byte_orig ;
4158
- ffe_ctl .for_treelog = for_treelog ;
4159
- ffe_ctl .policy = BTRFS_EXTENT_ALLOC_CLUSTERED ;
4144
+ WARN_ON (ffe_ctl -> num_bytes < fs_info -> sectorsize );
4160
4145
4146
+ ffe_ctl -> search_start = 0 ;
4147
+ /* For clustered allocation */
4148
+ ffe_ctl -> empty_cluster = 0 ;
4149
+ ffe_ctl -> last_ptr = NULL ;
4150
+ ffe_ctl -> use_cluster = true;
4151
+ ffe_ctl -> have_caching_bg = false;
4152
+ ffe_ctl -> orig_have_caching_bg = false;
4153
+ ffe_ctl -> index = btrfs_bg_flags_to_raid_index (ffe_ctl -> flags );
4154
+ ffe_ctl -> loop = 0 ;
4161
4155
/* For clustered allocation */
4162
- ffe_ctl .retry_clustered = false;
4163
- ffe_ctl .retry_unclustered = false;
4164
- ffe_ctl .last_ptr = NULL ;
4165
- ffe_ctl .use_cluster = true;
4156
+ ffe_ctl -> retry_clustered = false;
4157
+ ffe_ctl -> retry_unclustered = false;
4158
+ ffe_ctl -> cached = 0 ;
4159
+ ffe_ctl -> max_extent_size = 0 ;
4160
+ ffe_ctl -> total_free_space = 0 ;
4161
+ ffe_ctl -> found_offset = 0 ;
4162
+ ffe_ctl -> policy = BTRFS_EXTENT_ALLOC_CLUSTERED ;
4166
4163
4167
4164
if (btrfs_is_zoned (fs_info ))
4168
- ffe_ctl . policy = BTRFS_EXTENT_ALLOC_ZONED ;
4165
+ ffe_ctl -> policy = BTRFS_EXTENT_ALLOC_ZONED ;
4169
4166
4170
4167
ins -> type = BTRFS_EXTENT_ITEM_KEY ;
4171
4168
ins -> objectid = 0 ;
4172
4169
ins -> offset = 0 ;
4173
4170
4174
- trace_find_free_extent (root , num_bytes , empty_size , flags );
4171
+ trace_find_free_extent (root , ffe_ctl -> num_bytes , ffe_ctl -> empty_size ,
4172
+ ffe_ctl -> flags );
4175
4173
4176
- space_info = btrfs_find_space_info (fs_info , flags );
4174
+ space_info = btrfs_find_space_info (fs_info , ffe_ctl -> flags );
4177
4175
if (!space_info ) {
4178
- btrfs_err (fs_info , "No space info for %llu" , flags );
4176
+ btrfs_err (fs_info , "No space info for %llu" , ffe_ctl -> flags );
4179
4177
return - ENOSPC ;
4180
4178
}
4181
4179
4182
- ret = prepare_allocation (fs_info , & ffe_ctl , space_info , ins );
4180
+ ret = prepare_allocation (fs_info , ffe_ctl , space_info , ins );
4183
4181
if (ret < 0 )
4184
4182
return ret ;
4185
4183
4186
- ffe_ctl . search_start = max (ffe_ctl . search_start ,
4187
- first_logical_byte (fs_info , 0 ));
4188
- ffe_ctl . search_start = max (ffe_ctl . search_start , ffe_ctl . hint_byte );
4189
- if (ffe_ctl . search_start == ffe_ctl . hint_byte ) {
4184
+ ffe_ctl -> search_start = max (ffe_ctl -> search_start ,
4185
+ first_logical_byte (fs_info , 0 ));
4186
+ ffe_ctl -> search_start = max (ffe_ctl -> search_start , ffe_ctl -> hint_byte );
4187
+ if (ffe_ctl -> search_start == ffe_ctl -> hint_byte ) {
4190
4188
block_group = btrfs_lookup_block_group (fs_info ,
4191
- ffe_ctl . search_start );
4189
+ ffe_ctl -> search_start );
4192
4190
/*
4193
4191
* we don't want to use the block group if it doesn't match our
4194
4192
* allocation bits, or if its not cached.
4195
4193
*
4196
4194
* However if we are re-searching with an ideal block group
4197
4195
* picked out then we don't care that the block group is cached.
4198
4196
*/
4199
- if (block_group && block_group_bits (block_group , flags ) &&
4197
+ if (block_group && block_group_bits (block_group , ffe_ctl -> flags ) &&
4200
4198
block_group -> cached != BTRFS_CACHE_NO ) {
4201
4199
down_read (& space_info -> groups_sem );
4202
4200
if (list_empty (& block_group -> list ) ||
@@ -4210,41 +4208,42 @@ static noinline int find_free_extent(struct btrfs_root *root,
4210
4208
btrfs_put_block_group (block_group );
4211
4209
up_read (& space_info -> groups_sem );
4212
4210
} else {
4213
- ffe_ctl .index = btrfs_bg_flags_to_raid_index (
4214
- block_group -> flags );
4215
- btrfs_lock_block_group (block_group , delalloc );
4211
+ ffe_ctl -> index = btrfs_bg_flags_to_raid_index (
4212
+ block_group -> flags );
4213
+ btrfs_lock_block_group (block_group ,
4214
+ ffe_ctl -> delalloc );
4216
4215
goto have_block_group ;
4217
4216
}
4218
4217
} else if (block_group ) {
4219
4218
btrfs_put_block_group (block_group );
4220
4219
}
4221
4220
}
4222
4221
search :
4223
- ffe_ctl . have_caching_bg = false;
4224
- if (ffe_ctl . index == btrfs_bg_flags_to_raid_index (flags ) ||
4225
- ffe_ctl . index == 0 )
4222
+ ffe_ctl -> have_caching_bg = false;
4223
+ if (ffe_ctl -> index == btrfs_bg_flags_to_raid_index (ffe_ctl -> flags ) ||
4224
+ ffe_ctl -> index == 0 )
4226
4225
full_search = true;
4227
4226
down_read (& space_info -> groups_sem );
4228
4227
list_for_each_entry (block_group ,
4229
- & space_info -> block_groups [ffe_ctl . index ], list ) {
4228
+ & space_info -> block_groups [ffe_ctl -> index ], list ) {
4230
4229
struct btrfs_block_group * bg_ret ;
4231
4230
4232
4231
/* If the block group is read-only, we can skip it entirely. */
4233
4232
if (unlikely (block_group -> ro )) {
4234
- if (for_treelog )
4233
+ if (ffe_ctl -> for_treelog )
4235
4234
btrfs_clear_treelog_bg (block_group );
4236
4235
continue ;
4237
4236
}
4238
4237
4239
- btrfs_grab_block_group (block_group , delalloc );
4240
- ffe_ctl . search_start = block_group -> start ;
4238
+ btrfs_grab_block_group (block_group , ffe_ctl -> delalloc );
4239
+ ffe_ctl -> search_start = block_group -> start ;
4241
4240
4242
4241
/*
4243
4242
* this can happen if we end up cycling through all the
4244
4243
* raid types, but we want to make sure we only allocate
4245
4244
* for the proper type.
4246
4245
*/
4247
- if (!block_group_bits (block_group , flags )) {
4246
+ if (!block_group_bits (block_group , ffe_ctl -> flags )) {
4248
4247
u64 extra = BTRFS_BLOCK_GROUP_DUP |
4249
4248
BTRFS_BLOCK_GROUP_RAID1_MASK |
4250
4249
BTRFS_BLOCK_GROUP_RAID56_MASK |
@@ -4255,22 +4254,22 @@ static noinline int find_free_extent(struct btrfs_root *root,
4255
4254
* doesn't provide them, bail. This does allow us to
4256
4255
* fill raid0 from raid1.
4257
4256
*/
4258
- if ((flags & extra ) && !(block_group -> flags & extra ))
4257
+ if ((ffe_ctl -> flags & extra ) && !(block_group -> flags & extra ))
4259
4258
goto loop ;
4260
4259
4261
4260
/*
4262
4261
* This block group has different flags than we want.
4263
4262
* It's possible that we have MIXED_GROUP flag but no
4264
4263
* block group is mixed. Just skip such block group.
4265
4264
*/
4266
- btrfs_release_block_group (block_group , delalloc );
4265
+ btrfs_release_block_group (block_group , ffe_ctl -> delalloc );
4267
4266
continue ;
4268
4267
}
4269
4268
4270
4269
have_block_group :
4271
- ffe_ctl . cached = btrfs_block_group_done (block_group );
4272
- if (unlikely (!ffe_ctl . cached )) {
4273
- ffe_ctl . have_caching_bg = true;
4270
+ ffe_ctl -> cached = btrfs_block_group_done (block_group );
4271
+ if (unlikely (!ffe_ctl -> cached )) {
4272
+ ffe_ctl -> have_caching_bg = true;
4274
4273
ret = btrfs_cache_block_group (block_group , 0 );
4275
4274
4276
4275
/*
@@ -4293,10 +4292,11 @@ static noinline int find_free_extent(struct btrfs_root *root,
4293
4292
goto loop ;
4294
4293
4295
4294
bg_ret = NULL ;
4296
- ret = do_allocation (block_group , & ffe_ctl , & bg_ret );
4295
+ ret = do_allocation (block_group , ffe_ctl , & bg_ret );
4297
4296
if (ret == 0 ) {
4298
4297
if (bg_ret && bg_ret != block_group ) {
4299
- btrfs_release_block_group (block_group , delalloc );
4298
+ btrfs_release_block_group (block_group ,
4299
+ ffe_ctl -> delalloc );
4300
4300
block_group = bg_ret ;
4301
4301
}
4302
4302
} else if (ret == - EAGAIN ) {
@@ -4306,46 +4306,49 @@ static noinline int find_free_extent(struct btrfs_root *root,
4306
4306
}
4307
4307
4308
4308
/* Checks */
4309
- ffe_ctl . search_start = round_up (ffe_ctl . found_offset ,
4310
- fs_info -> stripesize );
4309
+ ffe_ctl -> search_start = round_up (ffe_ctl -> found_offset ,
4310
+ fs_info -> stripesize );
4311
4311
4312
4312
/* move on to the next group */
4313
- if (ffe_ctl . search_start + num_bytes >
4313
+ if (ffe_ctl -> search_start + ffe_ctl -> num_bytes >
4314
4314
block_group -> start + block_group -> length ) {
4315
4315
btrfs_add_free_space_unused (block_group ,
4316
- ffe_ctl .found_offset , num_bytes );
4316
+ ffe_ctl -> found_offset ,
4317
+ ffe_ctl -> num_bytes );
4317
4318
goto loop ;
4318
4319
}
4319
4320
4320
- if (ffe_ctl . found_offset < ffe_ctl . search_start )
4321
+ if (ffe_ctl -> found_offset < ffe_ctl -> search_start )
4321
4322
btrfs_add_free_space_unused (block_group ,
4322
- ffe_ctl . found_offset ,
4323
- ffe_ctl . search_start - ffe_ctl . found_offset );
4323
+ ffe_ctl -> found_offset ,
4324
+ ffe_ctl -> search_start - ffe_ctl -> found_offset );
4324
4325
4325
- ret = btrfs_add_reserved_bytes (block_group , ram_bytes ,
4326
- num_bytes , delalloc );
4326
+ ret = btrfs_add_reserved_bytes (block_group , ffe_ctl -> ram_bytes ,
4327
+ ffe_ctl -> num_bytes ,
4328
+ ffe_ctl -> delalloc );
4327
4329
if (ret == - EAGAIN ) {
4328
4330
btrfs_add_free_space_unused (block_group ,
4329
- ffe_ctl .found_offset , num_bytes );
4331
+ ffe_ctl -> found_offset ,
4332
+ ffe_ctl -> num_bytes );
4330
4333
goto loop ;
4331
4334
}
4332
4335
btrfs_inc_block_group_reservations (block_group );
4333
4336
4334
4337
/* we are all good, lets return */
4335
- ins -> objectid = ffe_ctl . search_start ;
4336
- ins -> offset = num_bytes ;
4338
+ ins -> objectid = ffe_ctl -> search_start ;
4339
+ ins -> offset = ffe_ctl -> num_bytes ;
4337
4340
4338
- trace_btrfs_reserve_extent (block_group , ffe_ctl . search_start ,
4339
- num_bytes );
4340
- btrfs_release_block_group (block_group , delalloc );
4341
+ trace_btrfs_reserve_extent (block_group , ffe_ctl -> search_start ,
4342
+ ffe_ctl -> num_bytes );
4343
+ btrfs_release_block_group (block_group , ffe_ctl -> delalloc );
4341
4344
break ;
4342
4345
loop :
4343
- release_block_group (block_group , & ffe_ctl , delalloc );
4346
+ release_block_group (block_group , ffe_ctl , ffe_ctl -> delalloc );
4344
4347
cond_resched ();
4345
4348
}
4346
4349
up_read (& space_info -> groups_sem );
4347
4350
4348
- ret = find_free_extent_update_loop (fs_info , ins , & ffe_ctl , full_search );
4351
+ ret = find_free_extent_update_loop (fs_info , ins , ffe_ctl , full_search );
4349
4352
if (ret > 0 )
4350
4353
goto search ;
4351
4354
@@ -4354,12 +4357,12 @@ static noinline int find_free_extent(struct btrfs_root *root,
4354
4357
* Use ffe_ctl->total_free_space as fallback if we can't find
4355
4358
* any contiguous hole.
4356
4359
*/
4357
- if (!ffe_ctl . max_extent_size )
4358
- ffe_ctl . max_extent_size = ffe_ctl . total_free_space ;
4360
+ if (!ffe_ctl -> max_extent_size )
4361
+ ffe_ctl -> max_extent_size = ffe_ctl -> total_free_space ;
4359
4362
spin_lock (& space_info -> lock );
4360
- space_info -> max_extent_size = ffe_ctl . max_extent_size ;
4363
+ space_info -> max_extent_size = ffe_ctl -> max_extent_size ;
4361
4364
spin_unlock (& space_info -> lock );
4362
- ins -> offset = ffe_ctl . max_extent_size ;
4365
+ ins -> offset = ffe_ctl -> max_extent_size ;
4363
4366
} else if (ret == - ENOSPC ) {
4364
4367
ret = cache_block_group_error ;
4365
4368
}
@@ -4417,6 +4420,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
4417
4420
struct btrfs_key * ins , int is_data , int delalloc )
4418
4421
{
4419
4422
struct btrfs_fs_info * fs_info = root -> fs_info ;
4423
+ struct find_free_extent_ctl ffe_ctl = {};
4420
4424
bool final_tried = num_bytes == min_alloc_size ;
4421
4425
u64 flags ;
4422
4426
int ret ;
@@ -4425,8 +4429,16 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
4425
4429
flags = get_alloc_profile_by_root (root , is_data );
4426
4430
again :
4427
4431
WARN_ON (num_bytes < fs_info -> sectorsize );
4428
- ret = find_free_extent (root , ram_bytes , num_bytes , empty_size ,
4429
- hint_byte , ins , flags , delalloc );
4432
+
4433
+ ffe_ctl .ram_bytes = ram_bytes ;
4434
+ ffe_ctl .num_bytes = num_bytes ;
4435
+ ffe_ctl .empty_size = empty_size ;
4436
+ ffe_ctl .flags = flags ;
4437
+ ffe_ctl .delalloc = delalloc ;
4438
+ ffe_ctl .hint_byte = hint_byte ;
4439
+ ffe_ctl .for_treelog = for_treelog ;
4440
+
4441
+ ret = find_free_extent (root , ins , & ffe_ctl );
4430
4442
if (!ret && !is_data ) {
4431
4443
btrfs_dec_block_group_reservations (fs_info , ins -> objectid );
4432
4444
} else if (ret == - ENOSPC ) {
0 commit comments