Skip to content

Commit 2705c93

Browse files
Ming Leiaxboe
authored andcommitted
block: kill QUEUE_FLAG_NO_SG_MERGE
Since bdced43 ("block: setup bi_phys_segments after splitting"), physical segment number is mainly figured out in blk_queue_split() for fast path, and the flag of BIO_SEG_VALID is set there too. Now only blk_recount_segments() and blk_recalc_rq_segments() use this flag. Basically blk_recount_segments() is bypassed in fast path given BIO_SEG_VALID is set in blk_queue_split(). For another user of blk_recalc_rq_segments(): - run in partial completion branch of blk_update_request, which is an unusual case - run in blk_cloned_rq_check_limits(), still not a big problem if the flag is killed since dm-rq is the only user. Multi-page bvec is enabled now, not doing S/G merging is rather pointless with the current setup of the I/O path, as it isn't going to save you a significant amount of cycles. Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Omar Sandoval <[email protected]> Signed-off-by: Ming Lei <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent ac4fa1d commit 2705c93

File tree

5 files changed

+6
-43
lines changed

5 files changed

+6
-43
lines changed

block/blk-merge.c

Lines changed: 6 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -358,8 +358,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
358358
EXPORT_SYMBOL(blk_queue_split);
359359

360360
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
361-
struct bio *bio,
362-
bool no_sg_merge)
361+
struct bio *bio)
363362
{
364363
struct bio_vec bv, bvprv = { NULL };
365364
int prev = 0;
@@ -385,13 +384,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
385384
nr_phys_segs = 0;
386385
for_each_bio(bio) {
387386
bio_for_each_bvec(bv, bio, iter) {
388-
/*
389-
* If SG merging is disabled, each bio vector is
390-
* a segment
391-
*/
392-
if (no_sg_merge)
393-
goto new_segment;
394-
395387
if (prev) {
396388
if (seg_size + bv.bv_len
397389
> queue_max_segment_size(q))
@@ -421,27 +413,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
421413

422414
void blk_recalc_rq_segments(struct request *rq)
423415
{
424-
bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
425-
&rq->q->queue_flags);
426-
427-
rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
428-
no_sg_merge);
416+
rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
429417
}
430418

431419
void blk_recount_segments(struct request_queue *q, struct bio *bio)
432420
{
433-
unsigned short seg_cnt = bio_segments(bio);
434-
435-
if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
436-
(seg_cnt < queue_max_segments(q)))
437-
bio->bi_phys_segments = seg_cnt;
438-
else {
439-
struct bio *nxt = bio->bi_next;
421+
struct bio *nxt = bio->bi_next;
440422

441-
bio->bi_next = NULL;
442-
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
443-
bio->bi_next = nxt;
444-
}
423+
bio->bi_next = NULL;
424+
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
425+
bio->bi_next = nxt;
445426

446427
bio_set_flag(bio, BIO_SEG_VALID);
447428
}

block/blk-mq-debugfs.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,6 @@ static const char *const blk_queue_flag_name[] = {
128128
QUEUE_FLAG_NAME(SAME_FORCE),
129129
QUEUE_FLAG_NAME(DEAD),
130130
QUEUE_FLAG_NAME(INIT_DONE),
131-
QUEUE_FLAG_NAME(NO_SG_MERGE),
132131
QUEUE_FLAG_NAME(POLL),
133132
QUEUE_FLAG_NAME(WC),
134133
QUEUE_FLAG_NAME(FUA),

block/blk-mq.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2837,9 +2837,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
28372837
set->map[HCTX_TYPE_POLL].nr_queues)
28382838
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
28392839

2840-
if (!(set->flags & BLK_MQ_F_SG_MERGE))
2841-
blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
2842-
28432840
q->sg_reserved_size = INT_MAX;
28442841

28452842
INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);

drivers/md/dm-table.c

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1698,14 +1698,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
16981698
return q && !blk_queue_add_random(q);
16991699
}
17001700

1701-
static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
1702-
sector_t start, sector_t len, void *data)
1703-
{
1704-
struct request_queue *q = bdev_get_queue(dev->bdev);
1705-
1706-
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
1707-
}
1708-
17091701
static bool dm_table_all_devices_attribute(struct dm_table *t,
17101702
iterate_devices_callout_fn func)
17111703
{
@@ -1902,11 +1894,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
19021894
if (!dm_table_supports_write_zeroes(t))
19031895
q->limits.max_write_zeroes_sectors = 0;
19041896

1905-
if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
1906-
blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
1907-
else
1908-
blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
1909-
19101897
dm_table_verify_integrity(t);
19111898

19121899
/*

include/linux/blkdev.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -588,7 +588,6 @@ struct request_queue {
588588
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
589589
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
590590
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
591-
#define QUEUE_FLAG_NO_SG_MERGE 15 /* don't attempt to merge SG segments*/
592591
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
593592
#define QUEUE_FLAG_WC 17 /* Write back caching */
594593
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */

0 commit comments

Comments
 (0)