Skip to content

Commit 15c12fc

Browse files
Christoph Hellwigkdave
authored andcommitted
btrfs: zoned: introduce a zone_info struct in btrfs_load_block_group_zone_info
Add a new zone_info structure to hold per-zone information in btrfs_load_block_group_zone_info and prepare for breaking out helpers from it. Reviewed-by: Johannes Thumshirn <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
1 parent 4d20c1d commit 15c12fc

File tree

1 file changed

+37
-47
lines changed

1 file changed

+37
-47
lines changed

fs/btrfs/zoned.c

Lines changed: 37 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1282,6 +1282,12 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
12821282
return ret;
12831283
}
12841284

1285+
struct zone_info {
1286+
u64 physical;
1287+
u64 capacity;
1288+
u64 alloc_offset;
1289+
};
1290+
12851291
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
12861292
{
12871293
struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -1291,12 +1297,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
12911297
struct btrfs_device *device;
12921298
u64 logical = cache->start;
12931299
u64 length = cache->length;
1300+
struct zone_info *zone_info = NULL;
12941301
int ret;
12951302
int i;
12961303
unsigned int nofs_flag;
1297-
u64 *alloc_offsets = NULL;
1298-
u64 *caps = NULL;
1299-
u64 *physical = NULL;
13001304
unsigned long *active = NULL;
13011305
u64 last_alloc = 0;
13021306
u32 num_sequential = 0, num_conventional = 0;
@@ -1328,20 +1332,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
13281332
goto out;
13291333
}
13301334

1331-
alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
1332-
if (!alloc_offsets) {
1333-
ret = -ENOMEM;
1334-
goto out;
1335-
}
1336-
1337-
caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
1338-
if (!caps) {
1339-
ret = -ENOMEM;
1340-
goto out;
1341-
}
1342-
1343-
physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
1344-
if (!physical) {
1335+
zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
1336+
if (!zone_info) {
13451337
ret = -ENOMEM;
13461338
goto out;
13471339
}
@@ -1353,20 +1345,21 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
13531345
}
13541346

13551347
for (i = 0; i < map->num_stripes; i++) {
1348+
struct zone_info *info = &zone_info[i];
13561349
bool is_sequential;
13571350
struct blk_zone zone;
13581351
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
13591352
int dev_replace_is_ongoing = 0;
13601353

13611354
device = map->stripes[i].dev;
1362-
physical[i] = map->stripes[i].physical;
1355+
info->physical = map->stripes[i].physical;
13631356

13641357
if (device->bdev == NULL) {
1365-
alloc_offsets[i] = WP_MISSING_DEV;
1358+
info->alloc_offset = WP_MISSING_DEV;
13661359
continue;
13671360
}
13681361

1369-
is_sequential = btrfs_dev_is_sequential(device, physical[i]);
1362+
is_sequential = btrfs_dev_is_sequential(device, info->physical);
13701363
if (is_sequential)
13711364
num_sequential++;
13721365
else
@@ -1380,33 +1373,33 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
13801373
__set_bit(i, active);
13811374

13821375
if (!is_sequential) {
1383-
alloc_offsets[i] = WP_CONVENTIONAL;
1376+
info->alloc_offset = WP_CONVENTIONAL;
13841377
continue;
13851378
}
13861379

13871380
/*
13881381
* This zone will be used for allocation, so mark this zone
13891382
* non-empty.
13901383
*/
1391-
btrfs_dev_clear_zone_empty(device, physical[i]);
1384+
btrfs_dev_clear_zone_empty(device, info->physical);
13921385

13931386
down_read(&dev_replace->rwsem);
13941387
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
13951388
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
1396-
btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
1389+
btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
13971390
up_read(&dev_replace->rwsem);
13981391

13991392
/*
14001393
* The group is mapped to a sequential zone. Get the zone write
14011394
* pointer to determine the allocation offset within the zone.
14021395
*/
1403-
WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
1396+
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
14041397
nofs_flag = memalloc_nofs_save();
1405-
ret = btrfs_get_dev_zone(device, physical[i], &zone);
1398+
ret = btrfs_get_dev_zone(device, info->physical, &zone);
14061399
memalloc_nofs_restore(nofs_flag);
14071400
if (ret == -EIO || ret == -EOPNOTSUPP) {
14081401
ret = 0;
1409-
alloc_offsets[i] = WP_MISSING_DEV;
1402+
info->alloc_offset = WP_MISSING_DEV;
14101403
continue;
14111404
} else if (ret) {
14121405
goto out;
@@ -1421,27 +1414,26 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
14211414
goto out;
14221415
}
14231416

1424-
caps[i] = (zone.capacity << SECTOR_SHIFT);
1417+
info->capacity = (zone.capacity << SECTOR_SHIFT);
14251418

14261419
switch (zone.cond) {
14271420
case BLK_ZONE_COND_OFFLINE:
14281421
case BLK_ZONE_COND_READONLY:
14291422
btrfs_err(fs_info,
14301423
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
1431-
physical[i] >> device->zone_info->zone_size_shift,
1424+
info->physical >> device->zone_info->zone_size_shift,
14321425
rcu_str_deref(device->name), device->devid);
1433-
alloc_offsets[i] = WP_MISSING_DEV;
1426+
info->alloc_offset = WP_MISSING_DEV;
14341427
break;
14351428
case BLK_ZONE_COND_EMPTY:
1436-
alloc_offsets[i] = 0;
1429+
info->alloc_offset = 0;
14371430
break;
14381431
case BLK_ZONE_COND_FULL:
1439-
alloc_offsets[i] = caps[i];
1432+
info->alloc_offset = info->capacity;
14401433
break;
14411434
default:
14421435
/* Partially used zone */
1443-
alloc_offsets[i] =
1444-
((zone.wp - zone.start) << SECTOR_SHIFT);
1436+
info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
14451437
__set_bit(i, active);
14461438
break;
14471439
}
@@ -1468,15 +1460,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
14681460

14691461
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
14701462
case 0: /* single */
1471-
if (alloc_offsets[0] == WP_MISSING_DEV) {
1463+
if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
14721464
btrfs_err(fs_info,
14731465
"zoned: cannot recover write pointer for zone %llu",
1474-
physical[0]);
1466+
zone_info[0].physical);
14751467
ret = -EIO;
14761468
goto out;
14771469
}
1478-
cache->alloc_offset = alloc_offsets[0];
1479-
cache->zone_capacity = caps[0];
1470+
cache->alloc_offset = zone_info[0].alloc_offset;
1471+
cache->zone_capacity = zone_info[0].capacity;
14801472
if (test_bit(0, active))
14811473
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
14821474
break;
@@ -1486,21 +1478,21 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
14861478
ret = -EINVAL;
14871479
goto out;
14881480
}
1489-
if (alloc_offsets[0] == WP_MISSING_DEV) {
1481+
if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
14901482
btrfs_err(fs_info,
14911483
"zoned: cannot recover write pointer for zone %llu",
1492-
physical[0]);
1484+
zone_info[0].physical);
14931485
ret = -EIO;
14941486
goto out;
14951487
}
1496-
if (alloc_offsets[1] == WP_MISSING_DEV) {
1488+
if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
14971489
btrfs_err(fs_info,
14981490
"zoned: cannot recover write pointer for zone %llu",
1499-
physical[1]);
1491+
zone_info[1].physical);
15001492
ret = -EIO;
15011493
goto out;
15021494
}
1503-
if (alloc_offsets[0] != alloc_offsets[1]) {
1495+
if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
15041496
btrfs_err(fs_info,
15051497
"zoned: write pointer offset mismatch of zones in DUP profile");
15061498
ret = -EIO;
@@ -1516,8 +1508,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
15161508
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
15171509
&cache->runtime_flags);
15181510
}
1519-
cache->alloc_offset = alloc_offsets[0];
1520-
cache->zone_capacity = min(caps[0], caps[1]);
1511+
cache->alloc_offset = zone_info[0].alloc_offset;
1512+
cache->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
15211513
break;
15221514
case BTRFS_BLOCK_GROUP_RAID1:
15231515
case BTRFS_BLOCK_GROUP_RAID0:
@@ -1570,9 +1562,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
15701562
cache->physical_map = NULL;
15711563
}
15721564
bitmap_free(active);
1573-
kfree(physical);
1574-
kfree(caps);
1575-
kfree(alloc_offsets);
1565+
kfree(zone_info);
15761566
free_extent_map(em);
15771567

15781568
return ret;

0 commit comments

Comments
 (0)