Skip to content

Commit 0ce6579

Browse files
committed
dm: impose configurable deadline for dm_request_fn's merge heuristic
Otherwise, for sequential workloads, the dm_request_fn can allow excessive request merging at the expense of increased service time. Add a per-device sysfs attribute to allow the user to control how long a request, that is a reasonable merge candidate, can be queued on the request queue. The resolution of this request dispatch deadline is in microseconds (ranging from 1 to 100000 usecs), to set a 20us deadline: echo 20 > /sys/block/dm-7/dm/rq_based_seq_io_merge_deadline The dm_request_fn's merge heuristic and associated extra accounting is disabled by default (rq_based_seq_io_merge_deadline is 0). This sysfs attribute is not applicable to bio-based DM devices so it will only ever report 0 for them. By allowing a request to remain on the queue it will block others requests on the queue. But introducing a short dequeue delay has proven very effective at enabling certain sequential IO workloads on really fast, yet IOPS constrained, devices to build up slightly larger IOs -- yielding 90+% throughput improvements. Having precise control over the time taken to wait for larger requests to build affords control beyond that of waiting for certain IO sizes to accumulate (which would require a deadline anyway). This knob will only ever make sense with sequential IO workloads and the particular value used is storage configuration specific. Given the expected niche use-case for when this knob is useful it has been deemed acceptable to expose this relatively crude method for crafting optimal IO on specific storage -- especially given the solution is simple yet effective. In the context of DM multipath, it is advisable to tune this sysfs attribute to a value that offers the best performance for the common case (e.g. if 4 paths are expected active, tune for that; if paths fail then performance may be slightly reduced). Alternatives were explored to have request-based DM autotune this value (e.g. if/when paths fail) but they were quickly deemed too fragile and complex to warrant further design and development time. If this problem proves more common as faster storage emerges we'll have to look at elevating a generic solution into the block core. Tested-by: Shiva Krishna Merla <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent b898320 commit 0ce6579

File tree

4 files changed

+73
-4
lines changed

4 files changed

+73
-4
lines changed

Documentation/ABI/testing/sysfs-block-dm

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,3 +23,17 @@ Description: Device-mapper device suspend state.
2323
Contains the value 1 while the device is suspended.
2424
Otherwise it contains 0. Read-only attribute.
2525
Users: util-linux, device-mapper udev rules
26+
27+
What: /sys/block/dm-<num>/dm/rq_based_seq_io_merge_deadline
28+
Date: March 2015
29+
KernelVersion: 4.1
30+
31+
Description: Allow control over how long a request that is a
32+
reasonable merge candidate can be queued on the request
33+
queue. The resolution of this deadline is in
34+
microseconds (ranging from 1 to 100000 usecs).
35+
Setting this attribute to 0 (the default) will disable
36+
request-based DM's merge heuristic and associated extra
37+
accounting. This attribute is not applicable to
38+
bio-based DM devices so it will only ever report 0 for
39+
them.

drivers/md/dm-sysfs.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,13 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
9292
static DM_ATTR_RO(name);
9393
static DM_ATTR_RO(uuid);
9494
static DM_ATTR_RO(suspended);
95+
static DM_ATTR_RW(rq_based_seq_io_merge_deadline);
9596

9697
static struct attribute *dm_attrs[] = {
9798
&dm_attr_name.attr,
9899
&dm_attr_uuid.attr,
99100
&dm_attr_suspended.attr,
101+
&dm_attr_rq_based_seq_io_merge_deadline.attr,
100102
NULL,
101103
};
102104

drivers/md/dm.c

Lines changed: 53 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <linux/delay.h>
2222
#include <linux/wait.h>
2323
#include <linux/kthread.h>
24+
#include <linux/ktime.h>
2425
#include <linux/elevator.h> /* for rq_end_sector() */
2526

2627
#include <trace/events/block.h>
@@ -219,8 +220,10 @@ struct mapped_device {
219220
struct task_struct *kworker_task;
220221

221222
/* for request-based merge heuristic in dm_request_fn() */
222-
sector_t last_rq_pos;
223+
unsigned seq_rq_merge_deadline_usecs;
223224
int last_rq_rw;
225+
sector_t last_rq_pos;
226+
ktime_t last_rq_start_time;
224227
};
225228

226229
/*
@@ -1935,8 +1938,11 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
19351938
blk_start_request(orig);
19361939
atomic_inc(&md->pending[rq_data_dir(orig)]);
19371940

1938-
md->last_rq_pos = rq_end_sector(orig);
1939-
md->last_rq_rw = rq_data_dir(orig);
1941+
if (md->seq_rq_merge_deadline_usecs) {
1942+
md->last_rq_pos = rq_end_sector(orig);
1943+
md->last_rq_rw = rq_data_dir(orig);
1944+
md->last_rq_start_time = ktime_get();
1945+
}
19401946

19411947
/*
19421948
* Hold the md reference here for the in-flight I/O.
@@ -1948,6 +1954,45 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
19481954
dm_get(md);
19491955
}
19501956

1957+
#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
1958+
1959+
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
1960+
{
1961+
return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
1962+
}
1963+
1964+
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
1965+
const char *buf, size_t count)
1966+
{
1967+
unsigned deadline;
1968+
1969+
if (!dm_request_based(md))
1970+
return count;
1971+
1972+
if (kstrtouint(buf, 10, &deadline))
1973+
return -EINVAL;
1974+
1975+
if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
1976+
deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
1977+
1978+
md->seq_rq_merge_deadline_usecs = deadline;
1979+
1980+
return count;
1981+
}
1982+
1983+
static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
1984+
{
1985+
ktime_t kt_deadline;
1986+
1987+
if (!md->seq_rq_merge_deadline_usecs)
1988+
return false;
1989+
1990+
kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
1991+
kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
1992+
1993+
return !ktime_after(ktime_get(), kt_deadline);
1994+
}
1995+
19511996
/*
19521997
* q->request_fn for request-based dm.
19531998
* Called with the queue lock held.
@@ -1990,7 +2035,8 @@ static void dm_request_fn(struct request_queue *q)
19902035
continue;
19912036
}
19922037

1993-
if (md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
2038+
if (dm_request_peeked_before_merge_deadline(md) &&
2039+
md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
19942040
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
19952041
goto delay_and_out;
19962042

@@ -2532,6 +2578,9 @@ static int dm_init_request_based_queue(struct mapped_device *md)
25322578
if (!q)
25332579
return 0;
25342580

2581+
/* disable dm_request_fn's merge heuristic by default */
2582+
md->seq_rq_merge_deadline_usecs = 0;
2583+
25352584
md->queue = q;
25362585
dm_init_md_queue(md);
25372586
blk_queue_softirq_done(md->queue, dm_softirq_done);

drivers/md/dm.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,4 +234,8 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
234234
return !maxlen || strlen(result) + 1 >= maxlen;
235235
}
236236

237+
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
238+
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
239+
const char *buf, size_t count);
240+
237241
#endif

0 commit comments

Comments
 (0)