26
26
#define __reverse_ffz (x ) __reverse_ffs(~(x))
27
27
28
28
static struct kmem_cache * discard_entry_slab ;
29
+ static struct kmem_cache * bio_entry_slab ;
29
30
static struct kmem_cache * sit_entry_set_slab ;
30
31
static struct kmem_cache * inmem_entry_slab ;
31
32
@@ -580,6 +581,74 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
580
581
mutex_unlock (& dirty_i -> seglist_lock );
581
582
}
582
583
584
+ static struct bio_entry * __add_bio_entry (struct f2fs_sb_info * sbi ,
585
+ struct bio * bio )
586
+ {
587
+ struct list_head * wait_list = & (SM_I (sbi )-> wait_list );
588
+ struct bio_entry * be = f2fs_kmem_cache_alloc (bio_entry_slab , GFP_NOFS );
589
+
590
+ INIT_LIST_HEAD (& be -> list );
591
+ be -> bio = bio ;
592
+ init_completion (& be -> event );
593
+ list_add_tail (& be -> list , wait_list );
594
+
595
+ return be ;
596
+ }
597
+
598
+ void f2fs_wait_all_discard_bio (struct f2fs_sb_info * sbi )
599
+ {
600
+ struct list_head * wait_list = & (SM_I (sbi )-> wait_list );
601
+ struct bio_entry * be , * tmp ;
602
+
603
+ list_for_each_entry_safe (be , tmp , wait_list , list ) {
604
+ struct bio * bio = be -> bio ;
605
+ int err ;
606
+
607
+ wait_for_completion_io (& be -> event );
608
+ err = be -> error ;
609
+ if (err == - EOPNOTSUPP )
610
+ err = 0 ;
611
+
612
+ if (err )
613
+ f2fs_msg (sbi -> sb , KERN_INFO ,
614
+ "Issue discard failed, ret: %d" , err );
615
+
616
+ bio_put (bio );
617
+ list_del (& be -> list );
618
+ kmem_cache_free (bio_entry_slab , be );
619
+ }
620
+ }
621
+
622
+ static void f2fs_submit_bio_wait_endio (struct bio * bio )
623
+ {
624
+ struct bio_entry * be = (struct bio_entry * )bio -> bi_private ;
625
+
626
+ be -> error = bio -> bi_error ;
627
+ complete (& be -> event );
628
+ }
629
+
630
+ /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
631
+ int __f2fs_issue_discard_async (struct f2fs_sb_info * sbi , sector_t sector ,
632
+ sector_t nr_sects , gfp_t gfp_mask , unsigned long flags )
633
+ {
634
+ struct block_device * bdev = sbi -> sb -> s_bdev ;
635
+ struct bio * bio = NULL ;
636
+ int err ;
637
+
638
+ err = __blkdev_issue_discard (bdev , sector , nr_sects , gfp_mask , flags ,
639
+ & bio );
640
+ if (!err && bio ) {
641
+ struct bio_entry * be = __add_bio_entry (sbi , bio );
642
+
643
+ bio -> bi_private = be ;
644
+ bio -> bi_end_io = f2fs_submit_bio_wait_endio ;
645
+ bio -> bi_opf |= REQ_SYNC ;
646
+ submit_bio (bio );
647
+ }
648
+
649
+ return err ;
650
+ }
651
+
583
652
static int f2fs_issue_discard (struct f2fs_sb_info * sbi ,
584
653
block_t blkstart , block_t blklen )
585
654
{
@@ -597,7 +666,7 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
597
666
sbi -> discard_blks -- ;
598
667
}
599
668
trace_f2fs_issue_discard (sbi -> sb , blkstart , blklen );
600
- return blkdev_issue_discard (sbi -> sb -> s_bdev , start , len , GFP_NOFS , 0 );
669
+ return __f2fs_issue_discard_async (sbi , start , len , GFP_NOFS , 0 );
601
670
}
602
671
603
672
bool discard_next_dnode (struct f2fs_sb_info * sbi , block_t blkaddr )
@@ -719,11 +788,14 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
719
788
struct list_head * head = & (SM_I (sbi )-> discard_list );
720
789
struct discard_entry * entry , * this ;
721
790
struct dirty_seglist_info * dirty_i = DIRTY_I (sbi );
791
+ struct blk_plug plug ;
722
792
unsigned long * prefree_map = dirty_i -> dirty_segmap [PRE ];
723
793
unsigned int start = 0 , end = -1 ;
724
794
unsigned int secno , start_segno ;
725
795
bool force = (cpc -> reason == CP_DISCARD );
726
796
797
+ blk_start_plug (& plug );
798
+
727
799
mutex_lock (& dirty_i -> seglist_lock );
728
800
729
801
while (1 ) {
@@ -772,6 +844,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
772
844
SM_I (sbi )-> nr_discards -= entry -> len ;
773
845
kmem_cache_free (discard_entry_slab , entry );
774
846
}
847
+
848
+ blk_finish_plug (& plug );
775
849
}
776
850
777
851
static bool __mark_sit_entry_dirty (struct f2fs_sb_info * sbi , unsigned int segno )
@@ -2457,6 +2531,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
2457
2531
sm_info -> min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS ;
2458
2532
2459
2533
INIT_LIST_HEAD (& sm_info -> discard_list );
2534
+ INIT_LIST_HEAD (& sm_info -> wait_list );
2460
2535
sm_info -> nr_discards = 0 ;
2461
2536
sm_info -> max_discards = 0 ;
2462
2537
@@ -2600,10 +2675,15 @@ int __init create_segment_manager_caches(void)
2600
2675
if (!discard_entry_slab )
2601
2676
goto fail ;
2602
2677
2678
+ bio_entry_slab = f2fs_kmem_cache_create ("bio_entry" ,
2679
+ sizeof (struct bio_entry ));
2680
+ if (!bio_entry_slab )
2681
+ goto destory_discard_entry ;
2682
+
2603
2683
sit_entry_set_slab = f2fs_kmem_cache_create ("sit_entry_set" ,
2604
2684
sizeof (struct sit_entry_set ));
2605
2685
if (!sit_entry_set_slab )
2606
- goto destory_discard_entry ;
2686
+ goto destroy_bio_entry ;
2607
2687
2608
2688
inmem_entry_slab = f2fs_kmem_cache_create ("inmem_page_entry" ,
2609
2689
sizeof (struct inmem_pages ));
@@ -2613,6 +2693,8 @@ int __init create_segment_manager_caches(void)
2613
2693
2614
2694
destroy_sit_entry_set :
2615
2695
kmem_cache_destroy (sit_entry_set_slab );
2696
+ destroy_bio_entry :
2697
+ kmem_cache_destroy (bio_entry_slab );
2616
2698
destory_discard_entry :
2617
2699
kmem_cache_destroy (discard_entry_slab );
2618
2700
fail :
@@ -2622,6 +2704,7 @@ int __init create_segment_manager_caches(void)
2622
2704
void destroy_segment_manager_caches (void )
2623
2705
{
2624
2706
kmem_cache_destroy (sit_entry_set_slab );
2707
+ kmem_cache_destroy (bio_entry_slab );
2625
2708
kmem_cache_destroy (discard_entry_slab );
2626
2709
kmem_cache_destroy (inmem_entry_slab );
2627
2710
}
0 commit comments