@@ -1288,19 +1288,103 @@ struct zone_info {
1288
1288
u64 alloc_offset ;
1289
1289
};
1290
1290
1291
+ static int btrfs_load_zone_info (struct btrfs_fs_info * fs_info , int zone_idx ,
1292
+ struct zone_info * info , unsigned long * active ,
1293
+ struct map_lookup * map )
1294
+ {
1295
+ struct btrfs_dev_replace * dev_replace = & fs_info -> dev_replace ;
1296
+ struct btrfs_device * device = map -> stripes [zone_idx ].dev ;
1297
+ int dev_replace_is_ongoing = 0 ;
1298
+ unsigned int nofs_flag ;
1299
+ struct blk_zone zone ;
1300
+ int ret ;
1301
+
1302
+ info -> physical = map -> stripes [zone_idx ].physical ;
1303
+
1304
+ if (!device -> bdev ) {
1305
+ info -> alloc_offset = WP_MISSING_DEV ;
1306
+ return 0 ;
1307
+ }
1308
+
1309
+ /* Consider a zone as active if we can allow any number of active zones. */
1310
+ if (!device -> zone_info -> max_active_zones )
1311
+ __set_bit (zone_idx , active );
1312
+
1313
+ if (!btrfs_dev_is_sequential (device , info -> physical )) {
1314
+ info -> alloc_offset = WP_CONVENTIONAL ;
1315
+ return 0 ;
1316
+ }
1317
+
1318
+ /* This zone will be used for allocation, so mark this zone non-empty. */
1319
+ btrfs_dev_clear_zone_empty (device , info -> physical );
1320
+
1321
+ down_read (& dev_replace -> rwsem );
1322
+ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing (dev_replace );
1323
+ if (dev_replace_is_ongoing && dev_replace -> tgtdev != NULL )
1324
+ btrfs_dev_clear_zone_empty (dev_replace -> tgtdev , info -> physical );
1325
+ up_read (& dev_replace -> rwsem );
1326
+
1327
+ /*
1328
+ * The group is mapped to a sequential zone. Get the zone write pointer
1329
+ * to determine the allocation offset within the zone.
1330
+ */
1331
+ WARN_ON (!IS_ALIGNED (info -> physical , fs_info -> zone_size ));
1332
+ nofs_flag = memalloc_nofs_save ();
1333
+ ret = btrfs_get_dev_zone (device , info -> physical , & zone );
1334
+ memalloc_nofs_restore (nofs_flag );
1335
+ if (ret ) {
1336
+ if (ret != - EIO && ret != - EOPNOTSUPP )
1337
+ return ret ;
1338
+ info -> alloc_offset = WP_MISSING_DEV ;
1339
+ return 0 ;
1340
+ }
1341
+
1342
+ if (zone .type == BLK_ZONE_TYPE_CONVENTIONAL ) {
1343
+ btrfs_err_in_rcu (fs_info ,
1344
+ "zoned: unexpected conventional zone %llu on device %s (devid %llu)" ,
1345
+ zone .start << SECTOR_SHIFT , rcu_str_deref (device -> name ),
1346
+ device -> devid );
1347
+ return - EIO ;
1348
+ }
1349
+
1350
+ info -> capacity = (zone .capacity << SECTOR_SHIFT );
1351
+
1352
+ switch (zone .cond ) {
1353
+ case BLK_ZONE_COND_OFFLINE :
1354
+ case BLK_ZONE_COND_READONLY :
1355
+ btrfs_err (fs_info ,
1356
+ "zoned: offline/readonly zone %llu on device %s (devid %llu)" ,
1357
+ (info -> physical >> device -> zone_info -> zone_size_shift ),
1358
+ rcu_str_deref (device -> name ), device -> devid );
1359
+ info -> alloc_offset = WP_MISSING_DEV ;
1360
+ break ;
1361
+ case BLK_ZONE_COND_EMPTY :
1362
+ info -> alloc_offset = 0 ;
1363
+ break ;
1364
+ case BLK_ZONE_COND_FULL :
1365
+ info -> alloc_offset = info -> capacity ;
1366
+ break ;
1367
+ default :
1368
+ /* Partially used zone. */
1369
+ info -> alloc_offset = ((zone .wp - zone .start ) << SECTOR_SHIFT );
1370
+ __set_bit (zone_idx , active );
1371
+ break ;
1372
+ }
1373
+
1374
+ return 0 ;
1375
+ }
1376
+
1291
1377
int btrfs_load_block_group_zone_info (struct btrfs_block_group * cache , bool new )
1292
1378
{
1293
1379
struct btrfs_fs_info * fs_info = cache -> fs_info ;
1294
1380
struct extent_map_tree * em_tree = & fs_info -> mapping_tree ;
1295
1381
struct extent_map * em ;
1296
1382
struct map_lookup * map ;
1297
- struct btrfs_device * device ;
1298
1383
u64 logical = cache -> start ;
1299
1384
u64 length = cache -> length ;
1300
1385
struct zone_info * zone_info = NULL ;
1301
1386
int ret ;
1302
1387
int i ;
1303
- unsigned int nofs_flag ;
1304
1388
unsigned long * active = NULL ;
1305
1389
u64 last_alloc = 0 ;
1306
1390
u32 num_sequential = 0 , num_conventional = 0 ;
@@ -1345,98 +1429,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
1345
1429
}
1346
1430
1347
1431
for (i = 0 ; i < map -> num_stripes ; i ++ ) {
1348
- struct zone_info * info = & zone_info [i ];
1349
- bool is_sequential ;
1350
- struct blk_zone zone ;
1351
- struct btrfs_dev_replace * dev_replace = & fs_info -> dev_replace ;
1352
- int dev_replace_is_ongoing = 0 ;
1353
-
1354
- device = map -> stripes [i ].dev ;
1355
- info -> physical = map -> stripes [i ].physical ;
1356
-
1357
- if (device -> bdev == NULL ) {
1358
- info -> alloc_offset = WP_MISSING_DEV ;
1359
- continue ;
1360
- }
1361
-
1362
- is_sequential = btrfs_dev_is_sequential (device , info -> physical );
1363
- if (is_sequential )
1364
- num_sequential ++ ;
1365
- else
1366
- num_conventional ++ ;
1367
-
1368
- /*
1369
- * Consider a zone as active if we can allow any number of
1370
- * active zones.
1371
- */
1372
- if (!device -> zone_info -> max_active_zones )
1373
- __set_bit (i , active );
1374
-
1375
- if (!is_sequential ) {
1376
- info -> alloc_offset = WP_CONVENTIONAL ;
1377
- continue ;
1378
- }
1379
-
1380
- /*
1381
- * This zone will be used for allocation, so mark this zone
1382
- * non-empty.
1383
- */
1384
- btrfs_dev_clear_zone_empty (device , info -> physical );
1385
-
1386
- down_read (& dev_replace -> rwsem );
1387
- dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing (dev_replace );
1388
- if (dev_replace_is_ongoing && dev_replace -> tgtdev != NULL )
1389
- btrfs_dev_clear_zone_empty (dev_replace -> tgtdev , info -> physical );
1390
- up_read (& dev_replace -> rwsem );
1391
-
1392
- /*
1393
- * The group is mapped to a sequential zone. Get the zone write
1394
- * pointer to determine the allocation offset within the zone.
1395
- */
1396
- WARN_ON (!IS_ALIGNED (info -> physical , fs_info -> zone_size ));
1397
- nofs_flag = memalloc_nofs_save ();
1398
- ret = btrfs_get_dev_zone (device , info -> physical , & zone );
1399
- memalloc_nofs_restore (nofs_flag );
1400
- if (ret == - EIO || ret == - EOPNOTSUPP ) {
1401
- ret = 0 ;
1402
- info -> alloc_offset = WP_MISSING_DEV ;
1403
- continue ;
1404
- } else if (ret ) {
1405
- goto out ;
1406
- }
1407
-
1408
- if (zone .type == BLK_ZONE_TYPE_CONVENTIONAL ) {
1409
- btrfs_err_in_rcu (fs_info ,
1410
- "zoned: unexpected conventional zone %llu on device %s (devid %llu)" ,
1411
- zone .start << SECTOR_SHIFT ,
1412
- rcu_str_deref (device -> name ), device -> devid );
1413
- ret = - EIO ;
1432
+ ret = btrfs_load_zone_info (fs_info , i , & zone_info [i ], active , map );
1433
+ if (ret )
1414
1434
goto out ;
1415
- }
1416
-
1417
- info -> capacity = (zone .capacity << SECTOR_SHIFT );
1418
1435
1419
- switch (zone .cond ) {
1420
- case BLK_ZONE_COND_OFFLINE :
1421
- case BLK_ZONE_COND_READONLY :
1422
- btrfs_err (fs_info ,
1423
- "zoned: offline/readonly zone %llu on device %s (devid %llu)" ,
1424
- info -> physical >> device -> zone_info -> zone_size_shift ,
1425
- rcu_str_deref (device -> name ), device -> devid );
1426
- info -> alloc_offset = WP_MISSING_DEV ;
1427
- break ;
1428
- case BLK_ZONE_COND_EMPTY :
1429
- info -> alloc_offset = 0 ;
1430
- break ;
1431
- case BLK_ZONE_COND_FULL :
1432
- info -> alloc_offset = info -> capacity ;
1433
- break ;
1434
- default :
1435
- /* Partially used zone */
1436
- info -> alloc_offset = ((zone .wp - zone .start ) << SECTOR_SHIFT );
1437
- __set_bit (i , active );
1438
- break ;
1439
- }
1436
+ if (zone_info [i ].alloc_offset == WP_CONVENTIONAL )
1437
+ num_conventional ++ ;
1438
+ else
1439
+ num_sequential ++ ;
1440
1440
}
1441
1441
1442
1442
if (num_sequential > 0 )
0 commit comments