@@ -720,6 +720,13 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
720
720
}
721
721
722
722
spin_lock (& tree -> lock );
723
+ if (cached_state && * cached_state ) {
724
+ state = * cached_state ;
725
+ if (state -> start == start && state -> tree ) {
726
+ node = & state -> rb_node ;
727
+ goto hit_next ;
728
+ }
729
+ }
723
730
/*
724
731
* this search will find all the extents that end after
725
732
* our range starts.
@@ -1286,6 +1293,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
1286
1293
u64 delalloc_start ;
1287
1294
u64 delalloc_end ;
1288
1295
u64 found ;
1296
+ struct extent_state * cached_state = NULL ;
1289
1297
int ret ;
1290
1298
int loops = 0 ;
1291
1299
@@ -1323,6 +1331,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
1323
1331
/* some of the pages are gone, lets avoid looping by
1324
1332
* shortening the size of the delalloc range we're searching
1325
1333
*/
1334
+ free_extent_state (cached_state );
1326
1335
if (!loops ) {
1327
1336
unsigned long offset = (* start ) & (PAGE_CACHE_SIZE - 1 );
1328
1337
max_bytes = PAGE_CACHE_SIZE - offset ;
@@ -1336,18 +1345,21 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
1336
1345
BUG_ON (ret );
1337
1346
1338
1347
/* step three, lock the state bits for the whole range */
1339
- lock_extent (tree , delalloc_start , delalloc_end , GFP_NOFS );
1348
+ lock_extent_bits (tree , delalloc_start , delalloc_end ,
1349
+ 0 , & cached_state , GFP_NOFS );
1340
1350
1341
1351
/* then test to make sure it is all still delalloc */
1342
1352
ret = test_range_bit (tree , delalloc_start , delalloc_end ,
1343
- EXTENT_DELALLOC , 1 );
1353
+ EXTENT_DELALLOC , 1 , cached_state );
1344
1354
if (!ret ) {
1345
- unlock_extent (tree , delalloc_start , delalloc_end , GFP_NOFS );
1355
+ unlock_extent_cached (tree , delalloc_start , delalloc_end ,
1356
+ & cached_state , GFP_NOFS );
1346
1357
__unlock_for_delalloc (inode , locked_page ,
1347
1358
delalloc_start , delalloc_end );
1348
1359
cond_resched ();
1349
1360
goto again ;
1350
1361
}
1362
+ free_extent_state (cached_state );
1351
1363
* start = delalloc_start ;
1352
1364
* end = delalloc_end ;
1353
1365
out_failed :
@@ -1530,14 +1542,17 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1530
1542
* range is found set.
1531
1543
*/
1532
1544
int test_range_bit (struct extent_io_tree * tree , u64 start , u64 end ,
1533
- int bits , int filled )
1545
+ int bits , int filled , struct extent_state * cached )
1534
1546
{
1535
1547
struct extent_state * state = NULL ;
1536
1548
struct rb_node * node ;
1537
1549
int bitset = 0 ;
1538
1550
1539
1551
spin_lock (& tree -> lock );
1540
- node = tree_search (tree , start );
1552
+ if (cached && cached -> tree && cached -> start == start )
1553
+ node = & cached -> rb_node ;
1554
+ else
1555
+ node = tree_search (tree , start );
1541
1556
while (node && start <= end ) {
1542
1557
state = rb_entry (node , struct extent_state , rb_node );
1543
1558
@@ -1580,7 +1595,7 @@ static int check_page_uptodate(struct extent_io_tree *tree,
1580
1595
{
1581
1596
u64 start = (u64 )page -> index << PAGE_CACHE_SHIFT ;
1582
1597
u64 end = start + PAGE_CACHE_SIZE - 1 ;
1583
- if (test_range_bit (tree , start , end , EXTENT_UPTODATE , 1 ))
1598
+ if (test_range_bit (tree , start , end , EXTENT_UPTODATE , 1 , NULL ))
1584
1599
SetPageUptodate (page );
1585
1600
return 0 ;
1586
1601
}
@@ -1594,7 +1609,7 @@ static int check_page_locked(struct extent_io_tree *tree,
1594
1609
{
1595
1610
u64 start = (u64 )page -> index << PAGE_CACHE_SHIFT ;
1596
1611
u64 end = start + PAGE_CACHE_SIZE - 1 ;
1597
- if (!test_range_bit (tree , start , end , EXTENT_LOCKED , 0 ))
1612
+ if (!test_range_bit (tree , start , end , EXTENT_LOCKED , 0 , NULL ))
1598
1613
unlock_page (page );
1599
1614
return 0 ;
1600
1615
}
@@ -2032,7 +2047,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2032
2047
continue ;
2033
2048
}
2034
2049
/* the get_extent function already copied into the page */
2035
- if (test_range_bit (tree , cur , cur_end , EXTENT_UPTODATE , 1 )) {
2050
+ if (test_range_bit (tree , cur , cur_end ,
2051
+ EXTENT_UPTODATE , 1 , NULL )) {
2036
2052
check_page_uptodate (tree , page );
2037
2053
unlock_extent (tree , cur , cur + iosize - 1 , GFP_NOFS );
2038
2054
cur = cur + iosize ;
@@ -2305,7 +2321,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2305
2321
}
2306
2322
/* leave this out until we have a page_mkwrite call */
2307
2323
if (0 && !test_range_bit (tree , cur , cur + iosize - 1 ,
2308
- EXTENT_DIRTY , 0 )) {
2324
+ EXTENT_DIRTY , 0 , NULL )) {
2309
2325
cur = cur + iosize ;
2310
2326
pg_offset += iosize ;
2311
2327
continue ;
@@ -2721,7 +2737,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
2721
2737
!isnew && !PageUptodate (page ) &&
2722
2738
(block_off_end > to || block_off_start < from ) &&
2723
2739
!test_range_bit (tree , block_start , cur_end ,
2724
- EXTENT_UPTODATE , 1 )) {
2740
+ EXTENT_UPTODATE , 1 , NULL )) {
2725
2741
u64 sector ;
2726
2742
u64 extent_offset = block_start - em -> start ;
2727
2743
size_t iosize ;
@@ -2776,7 +2792,7 @@ int try_release_extent_state(struct extent_map_tree *map,
2776
2792
int ret = 1 ;
2777
2793
2778
2794
if (test_range_bit (tree , start , end ,
2779
- EXTENT_IOBITS | EXTENT_ORDERED , 0 ))
2795
+ EXTENT_IOBITS | EXTENT_ORDERED , 0 , NULL ))
2780
2796
ret = 0 ;
2781
2797
else {
2782
2798
if ((mask & GFP_NOFS ) == GFP_NOFS )
@@ -2821,7 +2837,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2821
2837
extent_map_end (em ) - 1 ,
2822
2838
EXTENT_LOCKED | EXTENT_WRITEBACK |
2823
2839
EXTENT_ORDERED ,
2824
- 0 )) {
2840
+ 0 , NULL )) {
2825
2841
remove_extent_mapping (map , em );
2826
2842
/* once for the rb tree */
2827
2843
free_extent_map (em );
@@ -3237,7 +3253,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3237
3253
int uptodate ;
3238
3254
unsigned long index ;
3239
3255
3240
- ret = test_range_bit (tree , start , end , EXTENT_UPTODATE , 1 );
3256
+ ret = test_range_bit (tree , start , end , EXTENT_UPTODATE , 1 , NULL );
3241
3257
if (ret )
3242
3258
return 1 ;
3243
3259
while (start <= end ) {
@@ -3267,7 +3283,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
3267
3283
return 1 ;
3268
3284
3269
3285
ret = test_range_bit (tree , eb -> start , eb -> start + eb -> len - 1 ,
3270
- EXTENT_UPTODATE , 1 );
3286
+ EXTENT_UPTODATE , 1 , NULL );
3271
3287
if (ret )
3272
3288
return ret ;
3273
3289
@@ -3303,7 +3319,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3303
3319
return 0 ;
3304
3320
3305
3321
if (test_range_bit (tree , eb -> start , eb -> start + eb -> len - 1 ,
3306
- EXTENT_UPTODATE , 1 )) {
3322
+ EXTENT_UPTODATE , 1 , NULL )) {
3307
3323
return 0 ;
3308
3324
}
3309
3325
0 commit comments