@@ -170,15 +170,28 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
170
170
return blk_status_to_errno (ret );
171
171
}
172
172
173
- static void flush_write_bio (struct extent_page_data * epd )
173
+ /*
174
+ * Submit bio from extent page data via submit_one_bio
175
+ *
176
+ * Return 0 if everything is OK.
177
+ * Return <0 for error.
178
+ */
179
+ static int __must_check flush_write_bio (struct extent_page_data * epd )
174
180
{
175
- if (epd -> bio ) {
176
- int ret ;
181
+ int ret = 0 ;
177
182
183
+ if (epd -> bio ) {
178
184
ret = submit_one_bio (epd -> bio , 0 , 0 );
179
- BUG_ON (ret < 0 ); /* -ENOMEM */
185
+ /*
186
+ * Clean up of epd->bio is handled by its endio function.
187
+ * And endio is either triggered by successful bio execution
188
+ * or the error handler of submit bio hook.
189
+ * So at this point, no matter what happened, we don't need
190
+ * to clean up epd->bio.
191
+ */
180
192
epd -> bio = NULL ;
181
193
}
194
+ return ret ;
182
195
}
183
196
184
197
int __init extent_io_init (void )
@@ -3476,7 +3489,8 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
3476
3489
3477
3490
if (!btrfs_try_tree_write_lock (eb )) {
3478
3491
flush = 1 ;
3479
- flush_write_bio (epd );
3492
+ ret = flush_write_bio (epd );
3493
+ BUG_ON (ret < 0 );
3480
3494
btrfs_tree_lock (eb );
3481
3495
}
3482
3496
@@ -3485,7 +3499,8 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
3485
3499
if (!epd -> sync_io )
3486
3500
return 0 ;
3487
3501
if (!flush ) {
3488
- flush_write_bio (epd );
3502
+ ret = flush_write_bio (epd );
3503
+ BUG_ON (ret < 0 );
3489
3504
flush = 1 ;
3490
3505
}
3491
3506
while (1 ) {
@@ -3526,7 +3541,8 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
3526
3541
3527
3542
if (!trylock_page (p )) {
3528
3543
if (!flush ) {
3529
- flush_write_bio (epd );
3544
+ ret = flush_write_bio (epd );
3545
+ BUG_ON (ret < 0 );
3530
3546
flush = 1 ;
3531
3547
}
3532
3548
lock_page (p );
@@ -3718,6 +3734,7 @@ int btree_write_cache_pages(struct address_space *mapping,
3718
3734
.sync_io = wbc -> sync_mode == WB_SYNC_ALL ,
3719
3735
};
3720
3736
int ret = 0 ;
3737
+ int flush_ret ;
3721
3738
int done = 0 ;
3722
3739
int nr_to_write_done = 0 ;
3723
3740
struct pagevec pvec ;
@@ -3817,7 +3834,8 @@ int btree_write_cache_pages(struct address_space *mapping,
3817
3834
index = 0 ;
3818
3835
goto retry ;
3819
3836
}
3820
- flush_write_bio (& epd );
3837
+ flush_ret = flush_write_bio (& epd );
3838
+ BUG_ON (flush_ret < 0 );
3821
3839
return ret ;
3822
3840
}
3823
3841
@@ -3914,7 +3932,8 @@ static int extent_write_cache_pages(struct address_space *mapping,
3914
3932
* tmpfs file mapping
3915
3933
*/
3916
3934
if (!trylock_page (page )) {
3917
- flush_write_bio (epd );
3935
+ ret = flush_write_bio (epd );
3936
+ BUG_ON (ret < 0 );
3918
3937
lock_page (page );
3919
3938
}
3920
3939
@@ -3924,8 +3943,10 @@ static int extent_write_cache_pages(struct address_space *mapping,
3924
3943
}
3925
3944
3926
3945
if (wbc -> sync_mode != WB_SYNC_NONE ) {
3927
- if (PageWriteback (page ))
3928
- flush_write_bio (epd );
3946
+ if (PageWriteback (page )) {
3947
+ ret = flush_write_bio (epd );
3948
+ BUG_ON (ret < 0 );
3949
+ }
3929
3950
wait_on_page_writeback (page );
3930
3951
}
3931
3952
@@ -3986,6 +4007,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
3986
4007
int extent_write_full_page (struct page * page , struct writeback_control * wbc )
3987
4008
{
3988
4009
int ret ;
4010
+ int flush_ret ;
3989
4011
struct extent_page_data epd = {
3990
4012
.bio = NULL ,
3991
4013
.tree = & BTRFS_I (page -> mapping -> host )-> io_tree ,
@@ -3995,14 +4017,16 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
3995
4017
3996
4018
ret = __extent_writepage (page , wbc , & epd );
3997
4019
3998
- flush_write_bio (& epd );
4020
+ flush_ret = flush_write_bio (& epd );
4021
+ BUG_ON (flush_ret < 0 );
3999
4022
return ret ;
4000
4023
}
4001
4024
4002
4025
int extent_write_locked_range (struct inode * inode , u64 start , u64 end ,
4003
4026
int mode )
4004
4027
{
4005
4028
int ret = 0 ;
4029
+ int flush_ret ;
4006
4030
struct address_space * mapping = inode -> i_mapping ;
4007
4031
struct extent_io_tree * tree = & BTRFS_I (inode )-> io_tree ;
4008
4032
struct page * page ;
@@ -4035,14 +4059,16 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
4035
4059
start += PAGE_SIZE ;
4036
4060
}
4037
4061
4038
- flush_write_bio (& epd );
4062
+ flush_ret = flush_write_bio (& epd );
4063
+ BUG_ON (flush_ret < 0 );
4039
4064
return ret ;
4040
4065
}
4041
4066
4042
4067
int extent_writepages (struct address_space * mapping ,
4043
4068
struct writeback_control * wbc )
4044
4069
{
4045
4070
int ret = 0 ;
4071
+ int flush_ret ;
4046
4072
struct extent_page_data epd = {
4047
4073
.bio = NULL ,
4048
4074
.tree = & BTRFS_I (mapping -> host )-> io_tree ,
@@ -4051,7 +4077,8 @@ int extent_writepages(struct address_space *mapping,
4051
4077
};
4052
4078
4053
4079
ret = extent_write_cache_pages (mapping , wbc , & epd );
4054
- flush_write_bio (& epd );
4080
+ flush_ret = flush_write_bio (& epd );
4081
+ BUG_ON (flush_ret < 0 );
4055
4082
return ret ;
4056
4083
}
4057
4084
0 commit comments