@@ -1404,6 +1404,47 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1404
1404
1405
1405
}
1406
1406
1407
+ static int btrfs_find_new_delalloc_bytes (struct btrfs_inode * inode ,
1408
+ const u64 start ,
1409
+ const u64 len ,
1410
+ struct extent_state * * cached_state )
1411
+ {
1412
+ u64 search_start = start ;
1413
+ const u64 end = start + len - 1 ;
1414
+
1415
+ while (search_start < end ) {
1416
+ const u64 search_len = end - search_start + 1 ;
1417
+ struct extent_map * em ;
1418
+ u64 em_len ;
1419
+ int ret = 0 ;
1420
+
1421
+ em = btrfs_get_extent (inode , NULL , 0 , search_start ,
1422
+ search_len , 0 );
1423
+ if (IS_ERR (em ))
1424
+ return PTR_ERR (em );
1425
+
1426
+ if (em -> block_start != EXTENT_MAP_HOLE )
1427
+ goto next ;
1428
+
1429
+ em_len = em -> len ;
1430
+ if (em -> start < search_start )
1431
+ em_len -= search_start - em -> start ;
1432
+ if (em_len > search_len )
1433
+ em_len = search_len ;
1434
+
1435
+ ret = set_extent_bit (& inode -> io_tree , search_start ,
1436
+ search_start + em_len - 1 ,
1437
+ EXTENT_DELALLOC_NEW ,
1438
+ NULL , cached_state , GFP_NOFS );
1439
+ next :
1440
+ search_start = extent_map_end (em );
1441
+ free_extent_map (em );
1442
+ if (ret )
1443
+ return ret ;
1444
+ }
1445
+ return 0 ;
1446
+ }
1447
+
1407
1448
/*
1408
1449
* This function locks the extent and properly waits for data=ordered extents
1409
1450
* to finish before allowing the pages to be modified if need.
@@ -1432,8 +1473,11 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1432
1473
+ round_up (pos + write_bytes - start_pos ,
1433
1474
fs_info -> sectorsize ) - 1 ;
1434
1475
1435
- if (start_pos < inode -> vfs_inode .i_size ) {
1476
+ if (start_pos < inode -> vfs_inode .i_size ||
1477
+ (inode -> flags & BTRFS_INODE_PREALLOC )) {
1436
1478
struct btrfs_ordered_extent * ordered ;
1479
+ unsigned int clear_bits ;
1480
+
1437
1481
lock_extent_bits (& inode -> io_tree , start_pos , last_pos ,
1438
1482
cached_state );
1439
1483
ordered = btrfs_lookup_ordered_range (inode , start_pos ,
@@ -1454,11 +1498,19 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1454
1498
}
1455
1499
if (ordered )
1456
1500
btrfs_put_ordered_extent (ordered );
1457
-
1501
+ ret = btrfs_find_new_delalloc_bytes (inode , start_pos ,
1502
+ last_pos - start_pos + 1 ,
1503
+ cached_state );
1504
+ clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
1505
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG ;
1506
+ if (ret )
1507
+ clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED ;
1458
1508
clear_extent_bit (& inode -> io_tree , start_pos ,
1459
- last_pos , EXTENT_DIRTY | EXTENT_DELALLOC |
1460
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG ,
1461
- 0 , 0 , cached_state , GFP_NOFS );
1509
+ last_pos , clear_bits ,
1510
+ (clear_bits & EXTENT_LOCKED ) ? 1 : 0 ,
1511
+ 0 , cached_state , GFP_NOFS );
1512
+ if (ret )
1513
+ return ret ;
1462
1514
* lockstart = start_pos ;
1463
1515
* lockend = last_pos ;
1464
1516
ret = 1 ;
@@ -2848,8 +2900,10 @@ static long btrfs_fallocate(struct file *file, int mode,
2848
2900
}
2849
2901
ret = btrfs_qgroup_reserve_data (inode , cur_offset ,
2850
2902
last_byte - cur_offset );
2851
- if (ret < 0 )
2903
+ if (ret < 0 ) {
2904
+ free_extent_map (em );
2852
2905
break ;
2906
+ }
2853
2907
} else {
2854
2908
/*
2855
2909
* Do not need to reserve unwritten extent for this
0 commit comments