@@ -41,11 +41,23 @@ struct wb_writeback_work {
41
41
unsigned int for_kupdate :1 ;
42
42
unsigned int range_cyclic :1 ;
43
43
unsigned int for_background :1 ;
44
+ enum wb_reason reason ; /* why was writeback initiated? */
44
45
45
46
struct list_head list ; /* pending work list */
46
47
struct completion * done ; /* set if the caller waits */
47
48
};
48
49
50
+ const char * wb_reason_name [] = {
51
+ [WB_REASON_BACKGROUND ] = "background" ,
52
+ [WB_REASON_TRY_TO_FREE_PAGES ] = "try_to_free_pages" ,
53
+ [WB_REASON_SYNC ] = "sync" ,
54
+ [WB_REASON_PERIODIC ] = "periodic" ,
55
+ [WB_REASON_LAPTOP_TIMER ] = "laptop_timer" ,
56
+ [WB_REASON_FREE_MORE_MEM ] = "free_more_memory" ,
57
+ [WB_REASON_FS_FREE_SPACE ] = "fs_free_space" ,
58
+ [WB_REASON_FORKER_THREAD ] = "forker_thread"
59
+ };
60
+
49
61
/*
50
62
* Include the creation of the trace points after defining the
51
63
* wb_writeback_work structure so that the definition remains local to this
@@ -115,7 +127,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
115
127
116
128
static void
117
129
__bdi_start_writeback (struct backing_dev_info * bdi , long nr_pages ,
118
- bool range_cyclic )
130
+ bool range_cyclic , enum wb_reason reason )
119
131
{
120
132
struct wb_writeback_work * work ;
121
133
@@ -135,6 +147,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
135
147
work -> sync_mode = WB_SYNC_NONE ;
136
148
work -> nr_pages = nr_pages ;
137
149
work -> range_cyclic = range_cyclic ;
150
+ work -> reason = reason ;
138
151
139
152
bdi_queue_work (bdi , work );
140
153
}
@@ -150,9 +163,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
150
163
* completion. Caller need not hold sb s_umount semaphore.
151
164
*
152
165
*/
153
- void bdi_start_writeback (struct backing_dev_info * bdi , long nr_pages )
166
+ void bdi_start_writeback (struct backing_dev_info * bdi , long nr_pages ,
167
+ enum wb_reason reason )
154
168
{
155
- __bdi_start_writeback (bdi , nr_pages , true);
169
+ __bdi_start_writeback (bdi , nr_pages , true, reason );
156
170
}
157
171
158
172
/**
@@ -251,7 +265,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
251
265
*/
252
266
static int move_expired_inodes (struct list_head * delaying_queue ,
253
267
struct list_head * dispatch_queue ,
254
- unsigned long * older_than_this )
268
+ struct wb_writeback_work * work )
255
269
{
256
270
LIST_HEAD (tmp );
257
271
struct list_head * pos , * node ;
@@ -262,8 +276,8 @@ static int move_expired_inodes(struct list_head *delaying_queue,
262
276
263
277
while (!list_empty (delaying_queue )) {
264
278
inode = wb_inode (delaying_queue -> prev );
265
- if (older_than_this &&
266
- inode_dirtied_after (inode , * older_than_this ))
279
+ if (work -> older_than_this &&
280
+ inode_dirtied_after (inode , * work -> older_than_this ))
267
281
break ;
268
282
if (sb && sb != inode -> i_sb )
269
283
do_sb_sort = 1 ;
@@ -302,13 +316,13 @@ static int move_expired_inodes(struct list_head *delaying_queue,
302
316
* |
303
317
* +--> dequeue for IO
304
318
*/
305
- static void queue_io (struct bdi_writeback * wb , unsigned long * older_than_this )
319
+ static void queue_io (struct bdi_writeback * wb , struct wb_writeback_work * work )
306
320
{
307
321
int moved ;
308
322
assert_spin_locked (& wb -> list_lock );
309
323
list_splice_init (& wb -> b_more_io , & wb -> b_io );
310
- moved = move_expired_inodes (& wb -> b_dirty , & wb -> b_io , older_than_this );
311
- trace_writeback_queue_io (wb , older_than_this , moved );
324
+ moved = move_expired_inodes (& wb -> b_dirty , & wb -> b_io , work );
325
+ trace_writeback_queue_io (wb , work , moved );
312
326
}
313
327
314
328
static int write_inode (struct inode * inode , struct writeback_control * wbc )
@@ -641,31 +655,40 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
641
655
return wrote ;
642
656
}
643
657
644
- long writeback_inodes_wb (struct bdi_writeback * wb , long nr_pages )
658
+ long writeback_inodes_wb (struct bdi_writeback * wb , long nr_pages ,
659
+ enum wb_reason reason )
645
660
{
646
661
struct wb_writeback_work work = {
647
662
.nr_pages = nr_pages ,
648
663
.sync_mode = WB_SYNC_NONE ,
649
664
.range_cyclic = 1 ,
665
+ .reason = reason ,
650
666
};
651
667
652
668
spin_lock (& wb -> list_lock );
653
669
if (list_empty (& wb -> b_io ))
654
- queue_io (wb , NULL );
670
+ queue_io (wb , & work );
655
671
__writeback_inodes_wb (wb , & work );
656
672
spin_unlock (& wb -> list_lock );
657
673
658
674
return nr_pages - work .nr_pages ;
659
675
}
660
676
661
- static inline bool over_bground_thresh (void )
677
+ static bool over_bground_thresh (struct backing_dev_info * bdi )
662
678
{
663
679
unsigned long background_thresh , dirty_thresh ;
664
680
665
681
global_dirty_limits (& background_thresh , & dirty_thresh );
666
682
667
- return (global_page_state (NR_FILE_DIRTY ) +
668
- global_page_state (NR_UNSTABLE_NFS ) > background_thresh );
683
+ if (global_page_state (NR_FILE_DIRTY ) +
684
+ global_page_state (NR_UNSTABLE_NFS ) > background_thresh )
685
+ return true;
686
+
687
+ if (bdi_stat (bdi , BDI_RECLAIMABLE ) >
688
+ bdi_dirty_limit (bdi , background_thresh ))
689
+ return true;
690
+
691
+ return false;
669
692
}
670
693
671
694
/*
@@ -675,7 +698,7 @@ static inline bool over_bground_thresh(void)
675
698
static void wb_update_bandwidth (struct bdi_writeback * wb ,
676
699
unsigned long start_time )
677
700
{
678
- __bdi_update_bandwidth (wb -> bdi , 0 , 0 , 0 , 0 , start_time );
701
+ __bdi_update_bandwidth (wb -> bdi , 0 , 0 , 0 , 0 , 0 , start_time );
679
702
}
680
703
681
704
/*
@@ -727,7 +750,7 @@ static long wb_writeback(struct bdi_writeback *wb,
727
750
* For background writeout, stop when we are below the
728
751
* background dirty threshold
729
752
*/
730
- if (work -> for_background && !over_bground_thresh ())
753
+ if (work -> for_background && !over_bground_thresh (wb -> bdi ))
731
754
break ;
732
755
733
756
if (work -> for_kupdate ) {
@@ -738,7 +761,7 @@ static long wb_writeback(struct bdi_writeback *wb,
738
761
739
762
trace_writeback_start (wb -> bdi , work );
740
763
if (list_empty (& wb -> b_io ))
741
- queue_io (wb , work -> older_than_this );
764
+ queue_io (wb , work );
742
765
if (work -> sb )
743
766
progress = writeback_sb_inodes (work -> sb , wb , work );
744
767
else
@@ -811,13 +834,14 @@ static unsigned long get_nr_dirty_pages(void)
811
834
812
835
static long wb_check_background_flush (struct bdi_writeback * wb )
813
836
{
814
- if (over_bground_thresh ()) {
837
+ if (over_bground_thresh (wb -> bdi )) {
815
838
816
839
struct wb_writeback_work work = {
817
840
.nr_pages = LONG_MAX ,
818
841
.sync_mode = WB_SYNC_NONE ,
819
842
.for_background = 1 ,
820
843
.range_cyclic = 1 ,
844
+ .reason = WB_REASON_BACKGROUND ,
821
845
};
822
846
823
847
return wb_writeback (wb , & work );
@@ -851,6 +875,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
851
875
.sync_mode = WB_SYNC_NONE ,
852
876
.for_kupdate = 1 ,
853
877
.range_cyclic = 1 ,
878
+ .reason = WB_REASON_PERIODIC ,
854
879
};
855
880
856
881
return wb_writeback (wb , & work );
@@ -969,7 +994,7 @@ int bdi_writeback_thread(void *data)
969
994
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
970
995
* the whole world.
971
996
*/
972
- void wakeup_flusher_threads (long nr_pages )
997
+ void wakeup_flusher_threads (long nr_pages , enum wb_reason reason )
973
998
{
974
999
struct backing_dev_info * bdi ;
975
1000
@@ -982,7 +1007,7 @@ void wakeup_flusher_threads(long nr_pages)
982
1007
list_for_each_entry_rcu (bdi , & bdi_list , bdi_list ) {
983
1008
if (!bdi_has_dirty_io (bdi ))
984
1009
continue ;
985
- __bdi_start_writeback (bdi , nr_pages , false);
1010
+ __bdi_start_writeback (bdi , nr_pages , false, reason );
986
1011
}
987
1012
rcu_read_unlock ();
988
1013
}
@@ -1203,7 +1228,9 @@ static void wait_sb_inodes(struct super_block *sb)
1203
1228
* on how many (if any) will be written, and this function does not wait
1204
1229
* for IO completion of submitted IO.
1205
1230
*/
1206
- void writeback_inodes_sb_nr (struct super_block * sb , unsigned long nr )
1231
+ void writeback_inodes_sb_nr (struct super_block * sb ,
1232
+ unsigned long nr ,
1233
+ enum wb_reason reason )
1207
1234
{
1208
1235
DECLARE_COMPLETION_ONSTACK (done );
1209
1236
struct wb_writeback_work work = {
@@ -1212,6 +1239,7 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr)
1212
1239
.tagged_writepages = 1 ,
1213
1240
.done = & done ,
1214
1241
.nr_pages = nr ,
1242
+ .reason = reason ,
1215
1243
};
1216
1244
1217
1245
WARN_ON (!rwsem_is_locked (& sb -> s_umount ));
@@ -1228,9 +1256,9 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
1228
1256
* on how many (if any) will be written, and this function does not wait
1229
1257
* for IO completion of submitted IO.
1230
1258
*/
1231
- void writeback_inodes_sb (struct super_block * sb )
1259
+ void writeback_inodes_sb (struct super_block * sb , enum wb_reason reason )
1232
1260
{
1233
- return writeback_inodes_sb_nr (sb , get_nr_dirty_pages ());
1261
+ return writeback_inodes_sb_nr (sb , get_nr_dirty_pages (), reason );
1234
1262
}
1235
1263
EXPORT_SYMBOL (writeback_inodes_sb );
1236
1264
@@ -1241,11 +1269,11 @@ EXPORT_SYMBOL(writeback_inodes_sb);
1241
1269
* Invoke writeback_inodes_sb if no writeback is currently underway.
1242
1270
* Returns 1 if writeback was started, 0 if not.
1243
1271
*/
1244
- int writeback_inodes_sb_if_idle (struct super_block * sb )
1272
+ int writeback_inodes_sb_if_idle (struct super_block * sb , enum wb_reason reason )
1245
1273
{
1246
1274
if (!writeback_in_progress (sb -> s_bdi )) {
1247
1275
down_read (& sb -> s_umount );
1248
- writeback_inodes_sb (sb );
1276
+ writeback_inodes_sb (sb , reason );
1249
1277
up_read (& sb -> s_umount );
1250
1278
return 1 ;
1251
1279
} else
@@ -1262,11 +1290,12 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1262
1290
* Returns 1 if writeback was started, 0 if not.
1263
1291
*/
1264
1292
int writeback_inodes_sb_nr_if_idle (struct super_block * sb ,
1265
- unsigned long nr )
1293
+ unsigned long nr ,
1294
+ enum wb_reason reason )
1266
1295
{
1267
1296
if (!writeback_in_progress (sb -> s_bdi )) {
1268
1297
down_read (& sb -> s_umount );
1269
- writeback_inodes_sb_nr (sb , nr );
1298
+ writeback_inodes_sb_nr (sb , nr , reason );
1270
1299
up_read (& sb -> s_umount );
1271
1300
return 1 ;
1272
1301
} else
@@ -1290,6 +1319,7 @@ void sync_inodes_sb(struct super_block *sb)
1290
1319
.nr_pages = LONG_MAX ,
1291
1320
.range_cyclic = 0 ,
1292
1321
.done = & done ,
1322
+ .reason = WB_REASON_SYNC ,
1293
1323
};
1294
1324
1295
1325
WARN_ON (!rwsem_is_locked (& sb -> s_umount ));
0 commit comments