Skip to content

Commit 6aeb991

Browse files
Jim Zhaoakpm00
authored andcommitted
mm/page-writeback: consolidate wb_thresh bumping logic into __wb_calc_thresh
Address the feedback from 39ac998 ("mm/page-writeback: raise wb_thresh to prevent write blocking with strictlimit)". The wb_thresh bumping logic is scattered across wb_position_ratio, __wb_calc_thresh, and wb_update_dirty_ratelimit. For consistency, consolidate all wb_thresh bumping logic into __wb_calc_thresh. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Jim Zhao <[email protected]> Reviewed-by: Jan Kara <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Kemeng Shi <[email protected]> Cc: Guenter Roeck <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 686fa95 commit 6aeb991

File tree

1 file changed

+16
-37
lines changed

1 file changed

+16
-37
lines changed

mm/page-writeback.c

Lines changed: 16 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -942,33 +942,33 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc,
942942
wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio);
943943

944944
wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
945-
wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
946-
if (wb_thresh > wb_max_thresh)
947-
wb_thresh = wb_max_thresh;
948945

949946
/*
950-
* With strictlimit flag, the wb_thresh is treated as
951-
* a hard limit in balance_dirty_pages() and wb_position_ratio().
952-
* It's possible that wb_thresh is close to zero, not because
953-
* the device is slow, but because it has been inactive.
954-
* To prevent occasional writes from being blocked, we raise wb_thresh.
947+
* It's very possible that wb_thresh is close to 0 not because the
948+
* device is slow, but that it has remained inactive for long time.
949+
* Honour such devices a reasonable good (hopefully IO efficient)
950+
* threshold, so that the occasional writes won't be blocked and active
951+
* writes can rampup the threshold quickly.
955952
*/
956-
if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
957-
unsigned long limit = hard_dirty_limit(dom, dtc->thresh);
958-
u64 wb_scale_thresh = 0;
959-
960-
if (limit > dtc->dirty)
961-
wb_scale_thresh = (limit - dtc->dirty) / 100;
962-
wb_thresh = max(wb_thresh, min(wb_scale_thresh, wb_max_thresh / 4));
953+
if (thresh > dtc->dirty) {
954+
if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT))
955+
wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100);
956+
else
957+
wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8);
963958
}
964959

960+
wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
961+
if (wb_thresh > wb_max_thresh)
962+
wb_thresh = wb_max_thresh;
963+
965964
return wb_thresh;
966965
}
967966

968967
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
969968
{
970969
struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
971970

971+
domain_dirty_avail(&gdtc, true);
972972
return __wb_calc_thresh(&gdtc, thresh);
973973
}
974974

@@ -1145,12 +1145,6 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc)
11451145
if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
11461146
long long wb_pos_ratio;
11471147

1148-
if (dtc->wb_dirty < 8) {
1149-
dtc->pos_ratio = min_t(long long, pos_ratio * 2,
1150-
2 << RATELIMIT_CALC_SHIFT);
1151-
return;
1152-
}
1153-
11541148
if (dtc->wb_dirty >= wb_thresh)
11551149
return;
11561150

@@ -1221,14 +1215,6 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc)
12211215
*/
12221216
if (unlikely(wb_thresh > dtc->thresh))
12231217
wb_thresh = dtc->thresh;
1224-
/*
1225-
* It's very possible that wb_thresh is close to 0 not because the
1226-
* device is slow, but that it has remained inactive for long time.
1227-
* Honour such devices a reasonable good (hopefully IO efficient)
1228-
* threshold, so that the occasional writes won't be blocked and active
1229-
* writes can rampup the threshold quickly.
1230-
*/
1231-
wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
12321218
/*
12331219
* scale global setpoint to wb's:
12341220
* wb_setpoint = setpoint * wb_thresh / thresh
@@ -1484,17 +1470,10 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
14841470
* balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
14851471
* Hence, to calculate "step" properly, we have to use wb_dirty as
14861472
* "dirty" and wb_setpoint as "setpoint".
1487-
*
1488-
* We rampup dirty_ratelimit forcibly if wb_dirty is low because
1489-
* it's possible that wb_thresh is close to zero due to inactivity
1490-
* of backing device.
14911473
*/
14921474
if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
14931475
dirty = dtc->wb_dirty;
1494-
if (dtc->wb_dirty < 8)
1495-
setpoint = dtc->wb_dirty + 1;
1496-
else
1497-
setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1476+
setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
14981477
}
14991478

15001479
if (dirty < setpoint) {

0 commit comments

Comments
 (0)