@@ -839,171 +839,161 @@ static void free_async_extent_pages(struct async_extent *async_extent)
839
839
async_extent -> pages = NULL ;
840
840
}
841
841
842
- /*
843
- * phase two of compressed writeback. This is the ordered portion
844
- * of the code, which only gets called in the order the work was
845
- * queued. We walk all the async extents created by compress_file_range
846
- * and send them down to the disk.
847
- */
848
- static noinline void submit_compressed_extents (struct async_chunk * async_chunk )
842
+ static int submit_one_async_extent (struct btrfs_inode * inode ,
843
+ struct async_chunk * async_chunk ,
844
+ struct async_extent * async_extent ,
845
+ u64 * alloc_hint )
849
846
{
850
- struct btrfs_inode * inode = BTRFS_I (async_chunk -> inode );
851
- struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
852
- struct async_extent * async_extent ;
853
- u64 alloc_hint = 0 ;
847
+ struct extent_io_tree * io_tree = & inode -> io_tree ;
848
+ struct btrfs_root * root = inode -> root ;
849
+ struct btrfs_fs_info * fs_info = root -> fs_info ;
854
850
struct btrfs_key ins ;
855
851
struct extent_map * em ;
856
- struct btrfs_root * root = inode -> root ;
857
- struct extent_io_tree * io_tree = & inode -> io_tree ;
858
852
int ret = 0 ;
853
+ u64 start = async_extent -> start ;
854
+ u64 end = async_extent -> start + async_extent -> ram_size - 1 ;
859
855
860
- again :
861
- while (!list_empty (& async_chunk -> extents )) {
862
- async_extent = list_entry (async_chunk -> extents .next ,
863
- struct async_extent , list );
864
- list_del (& async_extent -> list );
865
-
866
- retry :
867
- lock_extent (io_tree , async_extent -> start ,
868
- async_extent -> start + async_extent -> ram_size - 1 );
869
- /* did the compression code fall back to uncompressed IO? */
870
- if (!async_extent -> pages ) {
871
- int page_started = 0 ;
872
- unsigned long nr_written = 0 ;
873
-
874
- /* allocate blocks */
875
- ret = cow_file_range (inode , async_chunk -> locked_page ,
876
- async_extent -> start ,
877
- async_extent -> start +
878
- async_extent -> ram_size - 1 ,
879
- & page_started , & nr_written , 0 );
880
-
881
- /* JDM XXX */
882
-
883
- /*
884
- * if page_started, cow_file_range inserted an
885
- * inline extent and took care of all the unlocking
886
- * and IO for us. Otherwise, we need to submit
887
- * all those pages down to the drive.
888
- */
889
- if (!page_started && !ret )
890
- extent_write_locked_range (& inode -> vfs_inode ,
891
- async_extent -> start ,
892
- async_extent -> start +
893
- async_extent -> ram_size - 1 ,
894
- WB_SYNC_ALL );
895
- else if (ret && async_chunk -> locked_page )
896
- unlock_page (async_chunk -> locked_page );
897
- kfree (async_extent );
898
- cond_resched ();
899
- continue ;
900
- }
856
+ lock_extent (io_tree , start , end );
901
857
902
- ret = btrfs_reserve_extent (root , async_extent -> ram_size ,
903
- async_extent -> compressed_size ,
904
- async_extent -> compressed_size ,
905
- 0 , alloc_hint , & ins , 1 , 1 );
906
- if (ret ) {
907
- free_async_extent_pages (async_extent );
858
+ /* We have fallback to uncompressed write */
859
+ if (!async_extent -> pages ) {
860
+ int page_started = 0 ;
861
+ unsigned long nr_written = 0 ;
908
862
909
- if (ret == - ENOSPC ) {
910
- unlock_extent (io_tree , async_extent -> start ,
911
- async_extent -> start +
912
- async_extent -> ram_size - 1 );
913
-
914
- /*
915
- * we need to redirty the pages if we decide to
916
- * fallback to uncompressed IO, otherwise we
917
- * will not submit these pages down to lower
918
- * layers.
919
- */
920
- extent_range_redirty_for_io (& inode -> vfs_inode ,
921
- async_extent -> start ,
922
- async_extent -> start +
923
- async_extent -> ram_size - 1 );
924
-
925
- goto retry ;
926
- }
927
- goto out_free ;
928
- }
929
863
/*
930
- * here we're doing allocation and writeback of the
931
- * compressed pages
864
+ * Call cow_file_range() to run the delalloc range directly,
865
+ * since we won't go to nocow or async path again.
932
866
*/
933
- em = create_io_em (inode , async_extent -> start ,
934
- async_extent -> ram_size , /* len */
935
- async_extent -> start , /* orig_start */
936
- ins .objectid , /* block_start */
937
- ins .offset , /* block_len */
938
- ins .offset , /* orig_block_len */
939
- async_extent -> ram_size , /* ram_bytes */
940
- async_extent -> compress_type ,
941
- BTRFS_ORDERED_COMPRESSED );
942
- if (IS_ERR (em ))
943
- /* ret value is not necessary due to void function */
944
- goto out_free_reserve ;
945
- free_extent_map (em );
946
-
947
- ret = btrfs_add_ordered_extent_compress (inode ,
948
- async_extent -> start ,
949
- ins .objectid ,
950
- async_extent -> ram_size ,
951
- ins .offset ,
952
- async_extent -> compress_type );
953
- if (ret ) {
954
- btrfs_drop_extent_cache (inode , async_extent -> start ,
955
- async_extent -> start +
956
- async_extent -> ram_size - 1 , 0 );
957
- goto out_free_reserve ;
958
- }
959
- btrfs_dec_block_group_reservations (fs_info , ins .objectid );
960
-
867
+ ret = cow_file_range (inode , async_chunk -> locked_page ,
868
+ start , end , & page_started , & nr_written , 0 );
961
869
/*
962
- * clear dirty, set writeback and unlock the pages.
870
+ * If @page_started, cow_file_range() inserted an inline extent
871
+ * and took care of all the unlocking and IO for us.
872
+ * Otherwise, we need to submit all those pages down to the
873
+ * drive.
963
874
*/
964
- extent_clear_unlock_delalloc (inode , async_extent -> start ,
965
- async_extent -> start +
966
- async_extent -> ram_size - 1 ,
967
- NULL , EXTENT_LOCKED | EXTENT_DELALLOC ,
968
- PAGE_UNLOCK | PAGE_START_WRITEBACK );
969
- if (btrfs_submit_compressed_write (inode , async_extent -> start ,
970
- async_extent -> ram_size ,
971
- ins .objectid ,
972
- ins .offset , async_extent -> pages ,
973
- async_extent -> nr_pages ,
974
- async_chunk -> write_flags ,
975
- async_chunk -> blkcg_css )) {
976
- const u64 start = async_extent -> start ;
977
- const u64 end = start + async_extent -> ram_size - 1 ;
978
-
979
- btrfs_writepage_endio_finish_ordered (inode , NULL , start ,
980
- end , false);
981
-
982
- extent_clear_unlock_delalloc (inode , start , end , NULL , 0 ,
983
- PAGE_END_WRITEBACK |
984
- PAGE_SET_ERROR );
985
- free_async_extent_pages (async_extent );
986
- }
987
- alloc_hint = ins .objectid + ins .offset ;
875
+ if (!page_started && !ret )
876
+ extent_write_locked_range (& inode -> vfs_inode , start ,
877
+ end , WB_SYNC_ALL );
878
+ else if (ret && async_chunk -> locked_page )
879
+ unlock_page (async_chunk -> locked_page );
988
880
kfree (async_extent );
989
- cond_resched () ;
881
+ return ret ;
990
882
}
991
- return ;
883
+
884
+ ret = btrfs_reserve_extent (root , async_extent -> ram_size ,
885
+ async_extent -> compressed_size ,
886
+ async_extent -> compressed_size ,
887
+ 0 , * alloc_hint , & ins , 1 , 1 );
888
+ if (ret ) {
889
+ free_async_extent_pages (async_extent );
890
+ /*
891
+ * Here we used to try again by going back to non-compressed
892
+ * path for ENOSPC. But we can't reserve space even for
893
+ * compressed size, how could it work for uncompressed size
894
+ * which requires larger size? So here we directly go error
895
+ * path.
896
+ */
897
+ goto out_free ;
898
+ }
899
+
900
+ /* Here we're doing allocation and writeback of the compressed pages */
901
+ em = create_io_em (inode , start ,
902
+ async_extent -> ram_size , /* len */
903
+ start , /* orig_start */
904
+ ins .objectid , /* block_start */
905
+ ins .offset , /* block_len */
906
+ ins .offset , /* orig_block_len */
907
+ async_extent -> ram_size , /* ram_bytes */
908
+ async_extent -> compress_type ,
909
+ BTRFS_ORDERED_COMPRESSED );
910
+ if (IS_ERR (em )) {
911
+ ret = PTR_ERR (em );
912
+ goto out_free_reserve ;
913
+ }
914
+ free_extent_map (em );
915
+
916
+ ret = btrfs_add_ordered_extent_compress (inode , start , /* file_offset */
917
+ ins .objectid , /* disk_bytenr */
918
+ async_extent -> ram_size , /* num_bytes */
919
+ ins .offset , /* disk_num_bytes */
920
+ async_extent -> compress_type );
921
+ if (ret ) {
922
+ btrfs_drop_extent_cache (inode , start , end , 0 );
923
+ goto out_free_reserve ;
924
+ }
925
+ btrfs_dec_block_group_reservations (fs_info , ins .objectid );
926
+
927
+ /* Clear dirty, set writeback and unlock the pages. */
928
+ extent_clear_unlock_delalloc (inode , start , end ,
929
+ NULL , EXTENT_LOCKED | EXTENT_DELALLOC ,
930
+ PAGE_UNLOCK | PAGE_START_WRITEBACK );
931
+ if (btrfs_submit_compressed_write (inode , start , /* file_offset */
932
+ async_extent -> ram_size , /* num_bytes */
933
+ ins .objectid , /* disk_bytenr */
934
+ ins .offset , /* compressed_len */
935
+ async_extent -> pages , /* compressed_pages */
936
+ async_extent -> nr_pages ,
937
+ async_chunk -> write_flags ,
938
+ async_chunk -> blkcg_css )) {
939
+ const u64 start = async_extent -> start ;
940
+ const u64 end = start + async_extent -> ram_size - 1 ;
941
+
942
+ btrfs_writepage_endio_finish_ordered (inode , NULL , start , end , 0 );
943
+
944
+ extent_clear_unlock_delalloc (inode , start , end , NULL , 0 ,
945
+ PAGE_END_WRITEBACK | PAGE_SET_ERROR );
946
+ free_async_extent_pages (async_extent );
947
+ }
948
+ * alloc_hint = ins .objectid + ins .offset ;
949
+ kfree (async_extent );
950
+ return ret ;
951
+
992
952
out_free_reserve :
993
953
btrfs_dec_block_group_reservations (fs_info , ins .objectid );
994
954
btrfs_free_reserved_extent (fs_info , ins .objectid , ins .offset , 1 );
995
955
out_free :
996
- extent_clear_unlock_delalloc (inode , async_extent -> start ,
997
- async_extent -> start +
998
- async_extent -> ram_size - 1 ,
956
+ extent_clear_unlock_delalloc (inode , start , end ,
999
957
NULL , EXTENT_LOCKED | EXTENT_DELALLOC |
1000
958
EXTENT_DELALLOC_NEW |
1001
959
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING ,
1002
960
PAGE_UNLOCK | PAGE_START_WRITEBACK |
1003
961
PAGE_END_WRITEBACK | PAGE_SET_ERROR );
1004
962
free_async_extent_pages (async_extent );
1005
963
kfree (async_extent );
1006
- goto again ;
964
+ return ret ;
965
+ }
966
+
967
+ /*
968
+ * Phase two of compressed writeback. This is the ordered portion of the code,
969
+ * which only gets called in the order the work was queued. We walk all the
970
+ * async extents created by compress_file_range and send them down to the disk.
971
+ */
972
+ static noinline void submit_compressed_extents (struct async_chunk * async_chunk )
973
+ {
974
+ struct btrfs_inode * inode = BTRFS_I (async_chunk -> inode );
975
+ struct btrfs_fs_info * fs_info = inode -> root -> fs_info ;
976
+ struct async_extent * async_extent ;
977
+ u64 alloc_hint = 0 ;
978
+ int ret = 0 ;
979
+
980
+ while (!list_empty (& async_chunk -> extents )) {
981
+ u64 extent_start ;
982
+ u64 ram_size ;
983
+
984
+ async_extent = list_entry (async_chunk -> extents .next ,
985
+ struct async_extent , list );
986
+ list_del (& async_extent -> list );
987
+ extent_start = async_extent -> start ;
988
+ ram_size = async_extent -> ram_size ;
989
+
990
+ ret = submit_one_async_extent (inode , async_chunk , async_extent ,
991
+ & alloc_hint );
992
+ btrfs_debug (fs_info ,
993
+ "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d" ,
994
+ inode -> root -> root_key .objectid ,
995
+ btrfs_ino (inode ), extent_start , ram_size , ret );
996
+ }
1007
997
}
1008
998
1009
999
static u64 get_extent_allocation_hint (struct btrfs_inode * inode , u64 start ,
0 commit comments