@@ -421,66 +421,6 @@ static int __check_block_validity(struct inode *inode, const char *func,
421
421
#define check_block_validity (inode , map ) \
422
422
__check_block_validity((inode), __func__, __LINE__, (map))
423
423
424
- /*
425
- * Return the number of contiguous dirty pages in a given inode
426
- * starting at page frame idx.
427
- */
428
- static pgoff_t ext4_num_dirty_pages (struct inode * inode , pgoff_t idx ,
429
- unsigned int max_pages )
430
- {
431
- struct address_space * mapping = inode -> i_mapping ;
432
- pgoff_t index ;
433
- struct pagevec pvec ;
434
- pgoff_t num = 0 ;
435
- int i , nr_pages , done = 0 ;
436
-
437
- if (max_pages == 0 )
438
- return 0 ;
439
- pagevec_init (& pvec , 0 );
440
- while (!done ) {
441
- index = idx ;
442
- nr_pages = pagevec_lookup_tag (& pvec , mapping , & index ,
443
- PAGECACHE_TAG_DIRTY ,
444
- (pgoff_t )PAGEVEC_SIZE );
445
- if (nr_pages == 0 )
446
- break ;
447
- for (i = 0 ; i < nr_pages ; i ++ ) {
448
- struct page * page = pvec .pages [i ];
449
- struct buffer_head * bh , * head ;
450
-
451
- lock_page (page );
452
- if (unlikely (page -> mapping != mapping ) ||
453
- !PageDirty (page ) ||
454
- PageWriteback (page ) ||
455
- page -> index != idx ) {
456
- done = 1 ;
457
- unlock_page (page );
458
- break ;
459
- }
460
- if (page_has_buffers (page )) {
461
- bh = head = page_buffers (page );
462
- do {
463
- if (!buffer_delay (bh ) &&
464
- !buffer_unwritten (bh ))
465
- done = 1 ;
466
- bh = bh -> b_this_page ;
467
- } while (!done && (bh != head ));
468
- }
469
- unlock_page (page );
470
- if (done )
471
- break ;
472
- idx ++ ;
473
- num ++ ;
474
- if (num >= max_pages ) {
475
- done = 1 ;
476
- break ;
477
- }
478
- }
479
- pagevec_release (& pvec );
480
- }
481
- return num ;
482
- }
483
-
484
424
#ifdef ES_AGGRESSIVE_TEST
485
425
static void ext4_map_blocks_es_recheck (handle_t * handle ,
486
426
struct inode * inode ,
@@ -2462,10 +2402,8 @@ static int ext4_da_writepages(struct address_space *mapping,
2462
2402
struct mpage_da_data mpd ;
2463
2403
struct inode * inode = mapping -> host ;
2464
2404
int pages_written = 0 ;
2465
- unsigned int max_pages ;
2466
2405
int range_cyclic , cycled = 1 , io_done = 0 ;
2467
2406
int needed_blocks , ret = 0 ;
2468
- long desired_nr_to_write , nr_to_writebump = 0 ;
2469
2407
loff_t range_start = wbc -> range_start ;
2470
2408
struct ext4_sb_info * sbi = EXT4_SB (mapping -> host -> i_sb );
2471
2409
pgoff_t done_index = 0 ;
@@ -2512,39 +2450,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2512
2450
end = wbc -> range_end >> PAGE_CACHE_SHIFT ;
2513
2451
}
2514
2452
2515
- /*
2516
- * This works around two forms of stupidity. The first is in
2517
- * the writeback code, which caps the maximum number of pages
2518
- * written to be 1024 pages. This is wrong on multiple
2519
- * levels; different architectues have a different page size,
2520
- * which changes the maximum amount of data which gets
2521
- * written. Secondly, 4 megabytes is way too small. XFS
2522
- * forces this value to be 16 megabytes by multiplying
2523
- * nr_to_write parameter by four, and then relies on its
2524
- * allocator to allocate larger extents to make them
2525
- * contiguous. Unfortunately this brings us to the second
2526
- * stupidity, which is that ext4's mballoc code only allocates
2527
- * at most 2048 blocks. So we force contiguous writes up to
2528
- * the number of dirty blocks in the inode, or
2529
- * sbi->max_writeback_mb_bump whichever is smaller.
2530
- */
2531
- max_pages = sbi -> s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT );
2532
- if (!range_cyclic && range_whole ) {
2533
- if (wbc -> nr_to_write == LONG_MAX )
2534
- desired_nr_to_write = wbc -> nr_to_write ;
2535
- else
2536
- desired_nr_to_write = wbc -> nr_to_write * 8 ;
2537
- } else
2538
- desired_nr_to_write = ext4_num_dirty_pages (inode , index ,
2539
- max_pages );
2540
- if (desired_nr_to_write > max_pages )
2541
- desired_nr_to_write = max_pages ;
2542
-
2543
- if (wbc -> nr_to_write < desired_nr_to_write ) {
2544
- nr_to_writebump = desired_nr_to_write - wbc -> nr_to_write ;
2545
- wbc -> nr_to_write = desired_nr_to_write ;
2546
- }
2547
-
2548
2453
retry :
2549
2454
if (wbc -> sync_mode == WB_SYNC_ALL || wbc -> tagged_writepages )
2550
2455
tag_pages_for_writeback (mapping , index , end );
@@ -2637,7 +2542,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2637
2542
mapping -> writeback_index = done_index ;
2638
2543
2639
2544
out_writepages :
2640
- wbc -> nr_to_write -= nr_to_writebump ;
2641
2545
wbc -> range_start = range_start ;
2642
2546
trace_ext4_da_writepages_result (inode , wbc , ret , pages_written );
2643
2547
return ret ;
0 commit comments