@@ -1119,10 +1119,7 @@ static int flush_dirty_cache(struct inode *inode)
1119
1119
}
1120
1120
1121
1121
static void noinline_for_stack
1122
- cleanup_write_cache_enospc (struct inode * inode ,
1123
- struct btrfs_io_ctl * io_ctl ,
1124
- struct extent_state * * cached_state ,
1125
- struct list_head * bitmap_list )
1122
+ cleanup_bitmap_list (struct list_head * bitmap_list )
1126
1123
{
1127
1124
struct list_head * pos , * n ;
1128
1125
@@ -1131,6 +1128,14 @@ cleanup_write_cache_enospc(struct inode *inode,
1131
1128
list_entry (pos , struct btrfs_free_space , list );
1132
1129
list_del_init (& entry -> list );
1133
1130
}
1131
+ }
1132
+
1133
+ static void noinline_for_stack
1134
+ cleanup_write_cache_enospc (struct inode * inode ,
1135
+ struct btrfs_io_ctl * io_ctl ,
1136
+ struct extent_state * * cached_state ,
1137
+ struct list_head * bitmap_list )
1138
+ {
1134
1139
io_ctl_drop_pages (io_ctl );
1135
1140
unlock_extent_cached (& BTRFS_I (inode )-> io_tree , 0 ,
1136
1141
i_size_read (inode ) - 1 , cached_state ,
@@ -1149,7 +1154,8 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
1149
1154
if (!inode )
1150
1155
return 0 ;
1151
1156
1152
- root = root -> fs_info -> tree_root ;
1157
+ if (block_group )
1158
+ root = root -> fs_info -> tree_root ;
1153
1159
1154
1160
/* Flush the dirty pages in the cache file. */
1155
1161
ret = flush_dirty_cache (inode );
@@ -1265,11 +1271,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1265
1271
ret = write_cache_extent_entries (io_ctl , ctl ,
1266
1272
block_group , & entries , & bitmaps ,
1267
1273
& bitmap_list );
1268
- spin_unlock (& ctl -> tree_lock );
1269
- if (ret ) {
1270
- mutex_unlock (& ctl -> cache_writeout_mutex );
1271
- goto out_nospc ;
1272
- }
1274
+ if (ret )
1275
+ goto out_nospc_locked ;
1273
1276
1274
1277
/*
1275
1278
* Some spaces that are freed in the current transaction are pinned,
@@ -1280,17 +1283,14 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1280
1283
* the dirty list and redo it. No locking needed
1281
1284
*/
1282
1285
ret = write_pinned_extent_entries (root , block_group , io_ctl , & entries );
1283
- if (ret ) {
1284
- mutex_unlock (& ctl -> cache_writeout_mutex );
1285
- goto out_nospc ;
1286
- }
1286
+ if (ret )
1287
+ goto out_nospc_locked ;
1287
1288
1288
1289
/*
1289
1290
* At last, we write out all the bitmaps and keep cache_writeout_mutex
1290
1291
* locked while doing it because a concurrent trim can be manipulating
1291
1292
* or freeing the bitmap.
1292
1293
*/
1293
- spin_lock (& ctl -> tree_lock );
1294
1294
ret = write_bitmap_entries (io_ctl , & bitmap_list );
1295
1295
spin_unlock (& ctl -> tree_lock );
1296
1296
mutex_unlock (& ctl -> cache_writeout_mutex );
@@ -1343,6 +1343,11 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1343
1343
iput (inode );
1344
1344
return ret ;
1345
1345
1346
+ out_nospc_locked :
1347
+ cleanup_bitmap_list (& bitmap_list );
1348
+ spin_unlock (& ctl -> tree_lock );
1349
+ mutex_unlock (& ctl -> cache_writeout_mutex );
1350
+
1346
1351
out_nospc :
1347
1352
cleanup_write_cache_enospc (inode , io_ctl , & cached_state , & bitmap_list );
1348
1353
@@ -3463,9 +3468,12 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
3463
3468
if (!btrfs_test_opt (root , INODE_MAP_CACHE ))
3464
3469
return 0 ;
3465
3470
3471
+ memset (& io_ctl , 0 , sizeof (io_ctl ));
3466
3472
ret = __btrfs_write_out_cache (root , inode , ctl , NULL , & io_ctl ,
3467
- trans , path , 0 ) ||
3468
- btrfs_wait_cache_io (root , trans , NULL , & io_ctl , path , 0 );
3473
+ trans , path , 0 );
3474
+ if (!ret )
3475
+ ret = btrfs_wait_cache_io (root , trans , NULL , & io_ctl , path , 0 );
3476
+
3469
3477
if (ret ) {
3470
3478
btrfs_delalloc_release_metadata (inode , inode -> i_size );
3471
3479
#ifdef DEBUG
0 commit comments