@@ -1282,6 +1282,12 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
1282
1282
return ret ;
1283
1283
}
1284
1284
1285
+ struct zone_info {
1286
+ u64 physical ;
1287
+ u64 capacity ;
1288
+ u64 alloc_offset ;
1289
+ };
1290
+
1285
1291
int btrfs_load_block_group_zone_info (struct btrfs_block_group * cache , bool new )
1286
1292
{
1287
1293
struct btrfs_fs_info * fs_info = cache -> fs_info ;
@@ -1291,12 +1297,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1291
1297
struct btrfs_device * device ;
1292
1298
u64 logical = cache -> start ;
1293
1299
u64 length = cache -> length ;
1300
+ struct zone_info * zone_info = NULL ;
1294
1301
int ret ;
1295
1302
int i ;
1296
1303
unsigned int nofs_flag ;
1297
- u64 * alloc_offsets = NULL ;
1298
- u64 * caps = NULL ;
1299
- u64 * physical = NULL ;
1300
1304
unsigned long * active = NULL ;
1301
1305
u64 last_alloc = 0 ;
1302
1306
u32 num_sequential = 0 , num_conventional = 0 ;
@@ -1328,20 +1332,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1328
1332
goto out ;
1329
1333
}
1330
1334
1331
- alloc_offsets = kcalloc (map -> num_stripes , sizeof (* alloc_offsets ), GFP_NOFS );
1332
- if (!alloc_offsets ) {
1333
- ret = - ENOMEM ;
1334
- goto out ;
1335
- }
1336
-
1337
- caps = kcalloc (map -> num_stripes , sizeof (* caps ), GFP_NOFS );
1338
- if (!caps ) {
1339
- ret = - ENOMEM ;
1340
- goto out ;
1341
- }
1342
-
1343
- physical = kcalloc (map -> num_stripes , sizeof (* physical ), GFP_NOFS );
1344
- if (!physical ) {
1335
+ zone_info = kcalloc (map -> num_stripes , sizeof (* zone_info ), GFP_NOFS );
1336
+ if (!zone_info ) {
1345
1337
ret = - ENOMEM ;
1346
1338
goto out ;
1347
1339
}
@@ -1353,20 +1345,21 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1353
1345
}
1354
1346
1355
1347
for (i = 0 ; i < map -> num_stripes ; i ++ ) {
1348
+ struct zone_info * info = & zone_info [i ];
1356
1349
bool is_sequential ;
1357
1350
struct blk_zone zone ;
1358
1351
struct btrfs_dev_replace * dev_replace = & fs_info -> dev_replace ;
1359
1352
int dev_replace_is_ongoing = 0 ;
1360
1353
1361
1354
device = map -> stripes [i ].dev ;
1362
- physical [ i ] = map -> stripes [i ].physical ;
1355
+ info -> physical = map -> stripes [i ].physical ;
1363
1356
1364
1357
if (device -> bdev == NULL ) {
1365
- alloc_offsets [ i ] = WP_MISSING_DEV ;
1358
+ info -> alloc_offset = WP_MISSING_DEV ;
1366
1359
continue ;
1367
1360
}
1368
1361
1369
- is_sequential = btrfs_dev_is_sequential (device , physical [ i ] );
1362
+ is_sequential = btrfs_dev_is_sequential (device , info -> physical );
1370
1363
if (is_sequential )
1371
1364
num_sequential ++ ;
1372
1365
else
@@ -1380,33 +1373,33 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1380
1373
__set_bit (i , active );
1381
1374
1382
1375
if (!is_sequential ) {
1383
- alloc_offsets [ i ] = WP_CONVENTIONAL ;
1376
+ info -> alloc_offset = WP_CONVENTIONAL ;
1384
1377
continue ;
1385
1378
}
1386
1379
1387
1380
/*
1388
1381
* This zone will be used for allocation, so mark this zone
1389
1382
* non-empty.
1390
1383
*/
1391
- btrfs_dev_clear_zone_empty (device , physical [ i ] );
1384
+ btrfs_dev_clear_zone_empty (device , info -> physical );
1392
1385
1393
1386
down_read (& dev_replace -> rwsem );
1394
1387
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing (dev_replace );
1395
1388
if (dev_replace_is_ongoing && dev_replace -> tgtdev != NULL )
1396
- btrfs_dev_clear_zone_empty (dev_replace -> tgtdev , physical [ i ] );
1389
+ btrfs_dev_clear_zone_empty (dev_replace -> tgtdev , info -> physical );
1397
1390
up_read (& dev_replace -> rwsem );
1398
1391
1399
1392
/*
1400
1393
* The group is mapped to a sequential zone. Get the zone write
1401
1394
* pointer to determine the allocation offset within the zone.
1402
1395
*/
1403
- WARN_ON (!IS_ALIGNED (physical [ i ] , fs_info -> zone_size ));
1396
+ WARN_ON (!IS_ALIGNED (info -> physical , fs_info -> zone_size ));
1404
1397
nofs_flag = memalloc_nofs_save ();
1405
- ret = btrfs_get_dev_zone (device , physical [ i ] , & zone );
1398
+ ret = btrfs_get_dev_zone (device , info -> physical , & zone );
1406
1399
memalloc_nofs_restore (nofs_flag );
1407
1400
if (ret == - EIO || ret == - EOPNOTSUPP ) {
1408
1401
ret = 0 ;
1409
- alloc_offsets [ i ] = WP_MISSING_DEV ;
1402
+ info -> alloc_offset = WP_MISSING_DEV ;
1410
1403
continue ;
1411
1404
} else if (ret ) {
1412
1405
goto out ;
@@ -1421,27 +1414,26 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1421
1414
goto out ;
1422
1415
}
1423
1416
1424
- caps [ i ] = (zone .capacity << SECTOR_SHIFT );
1417
+ info -> capacity = (zone .capacity << SECTOR_SHIFT );
1425
1418
1426
1419
switch (zone .cond ) {
1427
1420
case BLK_ZONE_COND_OFFLINE :
1428
1421
case BLK_ZONE_COND_READONLY :
1429
1422
btrfs_err (fs_info ,
1430
1423
"zoned: offline/readonly zone %llu on device %s (devid %llu)" ,
1431
- physical [ i ] >> device -> zone_info -> zone_size_shift ,
1424
+ info -> physical >> device -> zone_info -> zone_size_shift ,
1432
1425
rcu_str_deref (device -> name ), device -> devid );
1433
- alloc_offsets [ i ] = WP_MISSING_DEV ;
1426
+ info -> alloc_offset = WP_MISSING_DEV ;
1434
1427
break ;
1435
1428
case BLK_ZONE_COND_EMPTY :
1436
- alloc_offsets [ i ] = 0 ;
1429
+ info -> alloc_offset = 0 ;
1437
1430
break ;
1438
1431
case BLK_ZONE_COND_FULL :
1439
- alloc_offsets [ i ] = caps [ i ] ;
1432
+ info -> alloc_offset = info -> capacity ;
1440
1433
break ;
1441
1434
default :
1442
1435
/* Partially used zone */
1443
- alloc_offsets [i ] =
1444
- ((zone .wp - zone .start ) << SECTOR_SHIFT );
1436
+ info -> alloc_offset = ((zone .wp - zone .start ) << SECTOR_SHIFT );
1445
1437
__set_bit (i , active );
1446
1438
break ;
1447
1439
}
@@ -1468,15 +1460,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1468
1460
1469
1461
switch (map -> type & BTRFS_BLOCK_GROUP_PROFILE_MASK ) {
1470
1462
case 0 : /* single */
1471
- if (alloc_offsets [0 ] == WP_MISSING_DEV ) {
1463
+ if (zone_info [0 ]. alloc_offset == WP_MISSING_DEV ) {
1472
1464
btrfs_err (fs_info ,
1473
1465
"zoned: cannot recover write pointer for zone %llu" ,
1474
- physical [0 ]);
1466
+ zone_info [0 ]. physical );
1475
1467
ret = - EIO ;
1476
1468
goto out ;
1477
1469
}
1478
- cache -> alloc_offset = alloc_offsets [0 ];
1479
- cache -> zone_capacity = caps [0 ];
1470
+ cache -> alloc_offset = zone_info [0 ]. alloc_offset ;
1471
+ cache -> zone_capacity = zone_info [0 ]. capacity ;
1480
1472
if (test_bit (0 , active ))
1481
1473
set_bit (BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE , & cache -> runtime_flags );
1482
1474
break ;
@@ -1486,21 +1478,21 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1486
1478
ret = - EINVAL ;
1487
1479
goto out ;
1488
1480
}
1489
- if (alloc_offsets [0 ] == WP_MISSING_DEV ) {
1481
+ if (zone_info [0 ]. alloc_offset == WP_MISSING_DEV ) {
1490
1482
btrfs_err (fs_info ,
1491
1483
"zoned: cannot recover write pointer for zone %llu" ,
1492
- physical [0 ]);
1484
+ zone_info [0 ]. physical );
1493
1485
ret = - EIO ;
1494
1486
goto out ;
1495
1487
}
1496
- if (alloc_offsets [1 ] == WP_MISSING_DEV ) {
1488
+ if (zone_info [1 ]. alloc_offset == WP_MISSING_DEV ) {
1497
1489
btrfs_err (fs_info ,
1498
1490
"zoned: cannot recover write pointer for zone %llu" ,
1499
- physical [1 ]);
1491
+ zone_info [1 ]. physical );
1500
1492
ret = - EIO ;
1501
1493
goto out ;
1502
1494
}
1503
- if (alloc_offsets [0 ] != alloc_offsets [1 ]) {
1495
+ if (zone_info [0 ]. alloc_offset != zone_info [1 ]. alloc_offset ) {
1504
1496
btrfs_err (fs_info ,
1505
1497
"zoned: write pointer offset mismatch of zones in DUP profile" );
1506
1498
ret = - EIO ;
@@ -1516,8 +1508,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1516
1508
set_bit (BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE ,
1517
1509
& cache -> runtime_flags );
1518
1510
}
1519
- cache -> alloc_offset = alloc_offsets [0 ];
1520
- cache -> zone_capacity = min (caps [0 ], caps [1 ]);
1511
+ cache -> alloc_offset = zone_info [0 ]. alloc_offset ;
1512
+ cache -> zone_capacity = min (zone_info [0 ]. capacity , zone_info [1 ]. capacity );
1521
1513
break ;
1522
1514
case BTRFS_BLOCK_GROUP_RAID1 :
1523
1515
case BTRFS_BLOCK_GROUP_RAID0 :
@@ -1570,9 +1562,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1570
1562
cache -> physical_map = NULL ;
1571
1563
}
1572
1564
bitmap_free (active );
1573
- kfree (physical );
1574
- kfree (caps );
1575
- kfree (alloc_offsets );
1565
+ kfree (zone_info );
1576
1566
free_extent_map (em );
1577
1567
1578
1568
return ret ;
0 commit comments