Skip to content

Commit 1eff9d3

Browse files
committed
block: rename bio bi_rw to bi_opf
Since commit 63a4cc2, bio->bi_rw contains flags in the lower portion and the op code in the higher portions. This means that old code that relies on manually setting bi_rw is most likely going to be broken. Instead of letting that brokeness linger, rename the member, to force old and out-of-tree code to break at compile time instead of at runtime. No intended functional changes in this commit. Signed-off-by: Jens Axboe <[email protected]>
1 parent 31c64f7 commit 1eff9d3

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+158
-157
lines changed

Documentation/block/biodoc.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ Arjan's proposed request priority scheme allows higher levels some broad
269269
requests which haven't aged too much on the queue. Potentially this priority
270270
could even be exposed to applications in some manner, providing higher level
271271
tunability. Time based aging avoids starvation of lower priority
272-
requests. Some bits in the bi_rw flags field in the bio structure are
272+
requests. Some bits in the bi_opf flags field in the bio structure are
273273
intended to be used for this priority information.
274274

275275

@@ -432,7 +432,7 @@ struct bio {
432432
struct bio *bi_next; /* request queue link */
433433
struct block_device *bi_bdev; /* target device */
434434
unsigned long bi_flags; /* status, command, etc */
435-
unsigned long bi_rw; /* low bits: r/w, high: priority */
435+
unsigned long bi_opf; /* low bits: r/w, high: priority */
436436

437437
unsigned int bi_vcnt; /* how may bio_vec's */
438438
struct bvec_iter bi_iter; /* current index into bio_vec array */

Documentation/device-mapper/dm-flakey.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ Optional feature parameters:
4242
<direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
4343
'w' is incompatible with drop_writes.
4444
<value>: The value (from 0-255) to write.
45-
<flags>: Perform the replacement only if bio->bi_rw has all the
45+
<flags>: Perform the replacement only if bio->bi_opf has all the
4646
selected flags set.
4747

4848
Examples:

block/bio-integrity.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
8686

8787
bip->bip_bio = bio;
8888
bio->bi_integrity = bip;
89-
bio->bi_rw |= REQ_INTEGRITY;
89+
bio->bi_opf |= REQ_INTEGRITY;
9090

9191
return bip;
9292
err:

block/bio.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
580580
*/
581581
bio->bi_bdev = bio_src->bi_bdev;
582582
bio_set_flag(bio, BIO_CLONED);
583-
bio->bi_rw = bio_src->bi_rw;
583+
bio->bi_opf = bio_src->bi_opf;
584584
bio->bi_iter = bio_src->bi_iter;
585585
bio->bi_io_vec = bio_src->bi_io_vec;
586586

@@ -663,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
663663
if (!bio)
664664
return NULL;
665665
bio->bi_bdev = bio_src->bi_bdev;
666-
bio->bi_rw = bio_src->bi_rw;
666+
bio->bi_opf = bio_src->bi_opf;
667667
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
668668
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
669669

@@ -873,7 +873,7 @@ int submit_bio_wait(struct bio *bio)
873873
init_completion(&ret.event);
874874
bio->bi_private = &ret;
875875
bio->bi_end_io = submit_bio_wait_endio;
876-
bio->bi_rw |= REQ_SYNC;
876+
bio->bi_opf |= REQ_SYNC;
877877
submit_bio(bio);
878878
wait_for_completion_io(&ret.event);
879879

block/blk-core.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
10291029
* Flush requests do not use the elevator so skip initialization.
10301030
* This allows a request to share the flush and elevator data.
10311031
*/
1032-
if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
1032+
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
10331033
return false;
10341034

10351035
return true;
@@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
15041504
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
15051505
struct bio *bio)
15061506
{
1507-
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1507+
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
15081508

15091509
if (!ll_back_merge_fn(q, req, bio))
15101510
return false;
@@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
15261526
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
15271527
struct bio *bio)
15281528
{
1529-
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1529+
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
15301530

15311531
if (!ll_front_merge_fn(q, req, bio))
15321532
return false;
@@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
16481648
{
16491649
req->cmd_type = REQ_TYPE_FS;
16501650

1651-
req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1652-
if (bio->bi_rw & REQ_RAHEAD)
1651+
req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
1652+
if (bio->bi_opf & REQ_RAHEAD)
16531653
req->cmd_flags |= REQ_FAILFAST_MASK;
16541654

16551655
req->errors = 0;
@@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
16601660

16611661
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
16621662
{
1663-
const bool sync = !!(bio->bi_rw & REQ_SYNC);
1663+
const bool sync = !!(bio->bi_opf & REQ_SYNC);
16641664
struct blk_plug *plug;
16651665
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
16661666
struct request *req;
@@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
16811681
return BLK_QC_T_NONE;
16821682
}
16831683

1684-
if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
1684+
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
16851685
spin_lock_irq(q->queue_lock);
16861686
where = ELEVATOR_INSERT_FLUSH;
16871687
goto get_rq;
@@ -1728,7 +1728,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
17281728
/*
17291729
* Add in META/PRIO flags, if set, before we get to the IO scheduler
17301730
*/
1731-
rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
1731+
rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
17321732

17331733
/*
17341734
* Grab a free request. This is might sleep but can not fail.
@@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio)
18051805
printk(KERN_INFO "attempt to access beyond end of device\n");
18061806
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
18071807
bdevname(bio->bi_bdev, b),
1808-
bio->bi_rw,
1808+
bio->bi_opf,
18091809
(unsigned long long)bio_end_sector(bio),
18101810
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
18111811
}
@@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio)
19181918
* drivers without flush support don't have to worry
19191919
* about them.
19201920
*/
1921-
if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
1921+
if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
19221922
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1923-
bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
1923+
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
19241924
if (!nr_sectors) {
19251925
err = 0;
19261926
goto end_io;
@@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
22192219
* one.
22202220
*/
22212221
for (bio = rq->bio; bio; bio = bio->bi_next) {
2222-
if ((bio->bi_rw & ff) != ff)
2222+
if ((bio->bi_opf & ff) != ff)
22232223
break;
22242224
bytes += bio->bi_iter.bi_size;
22252225
}
@@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
26302630
/* mixed attributes always follow the first bio */
26312631
if (req->cmd_flags & REQ_MIXED_MERGE) {
26322632
req->cmd_flags &= ~REQ_FAILFAST_MASK;
2633-
req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2633+
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
26342634
}
26352635

26362636
/*

block/blk-merge.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
186186

187187
if (split) {
188188
/* there isn't chance to merge the splitted bio */
189-
split->bi_rw |= REQ_NOMERGE;
189+
split->bi_opf |= REQ_NOMERGE;
190190

191191
bio_chain(split, *bio);
192192
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
@@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq)
616616
* Distributes the attributs to each bio.
617617
*/
618618
for (bio = rq->bio; bio; bio = bio->bi_next) {
619-
WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
620-
(bio->bi_rw & REQ_FAILFAST_MASK) != ff);
621-
bio->bi_rw |= ff;
619+
WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
620+
(bio->bi_opf & REQ_FAILFAST_MASK) != ff);
621+
bio->bi_opf |= ff;
622622
}
623623
rq->cmd_flags |= REQ_MIXED_MERGE;
624624
}

block/blk-mq.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1234,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
12341234
ctx = blk_mq_get_ctx(q);
12351235
hctx = q->mq_ops->map_queue(q, ctx->cpu);
12361236

1237-
if (rw_is_sync(bio_op(bio), bio->bi_rw))
1237+
if (rw_is_sync(bio_op(bio), bio->bi_opf))
12381238
op_flags |= REQ_SYNC;
12391239

12401240
trace_block_getrq(q, bio, op);
@@ -1302,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
13021302
*/
13031303
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
13041304
{
1305-
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1306-
const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1305+
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
1306+
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
13071307
struct blk_map_ctx data;
13081308
struct request *rq;
13091309
unsigned int request_count = 0;
@@ -1396,8 +1396,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
13961396
*/
13971397
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
13981398
{
1399-
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1400-
const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1399+
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
1400+
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
14011401
struct blk_plug *plug;
14021402
unsigned int request_count = 0;
14031403
struct blk_map_ctx data;

block/blk-throttle.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -821,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
821821
* second time when it eventually gets issued. Set it when a bio
822822
* is being charged to a tg.
823823
*/
824-
if (!(bio->bi_rw & REQ_THROTTLED))
825-
bio->bi_rw |= REQ_THROTTLED;
824+
if (!(bio->bi_opf & REQ_THROTTLED))
825+
bio->bi_opf |= REQ_THROTTLED;
826826
}
827827

828828
/**
@@ -1399,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
13991399
WARN_ON_ONCE(!rcu_read_lock_held());
14001400

14011401
/* see throtl_charge_bio() */
1402-
if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
1402+
if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
14031403
goto out;
14041404

14051405
spin_lock_irq(q->queue_lock);
@@ -1478,7 +1478,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
14781478
* being issued.
14791479
*/
14801480
if (!throttled)
1481-
bio->bi_rw &= ~REQ_THROTTLED;
1481+
bio->bi_opf &= ~REQ_THROTTLED;
14821482
return throttled;
14831483
}
14841484

block/cfq-iosched.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
918918
*/
919919
static inline bool cfq_bio_sync(struct bio *bio)
920920
{
921-
return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
921+
return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
922922
}
923923

924924
/*
@@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
25652565
static void cfq_bio_merged(struct request_queue *q, struct request *req,
25662566
struct bio *bio)
25672567
{
2568-
cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
2568+
cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
25692569
}
25702570

25712571
static void

drivers/block/drbd/drbd_main.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
16631663
struct bio *bio)
16641664
{
16651665
if (connection->agreed_pro_version >= 95)
1666-
return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1667-
(bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
1668-
(bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
1666+
return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1667+
(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1668+
(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
16691669
(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
16701670
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
16711671
else
1672-
return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
1672+
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
16731673
}
16741674

16751675
/* Used to send write or TRIM aka REQ_DISCARD requests

drivers/block/drbd/drbd_receiver.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
15641564
* drbd_submit_peer_request()
15651565
* @device: DRBD device.
15661566
* @peer_req: peer request
1567-
* @rw: flag field, see bio->bi_rw
1567+
* @rw: flag field, see bio->bi_opf
15681568
*
15691569
* May spread the pages to multiple bios,
15701570
* depending on bio_add_page restrictions.

drivers/block/drbd/drbd_req.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
288288
*/
289289
if (!ok &&
290290
bio_op(req->master_bio) == REQ_OP_READ &&
291-
!(req->master_bio->bi_rw & REQ_RAHEAD) &&
291+
!(req->master_bio->bi_opf & REQ_RAHEAD) &&
292292
!list_empty(&req->tl_requests))
293293
req->rq_state |= RQ_POSTPONED;
294294

@@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
11371137
* replicating, in which case there is no point. */
11381138
if (unlikely(req->i.size == 0)) {
11391139
/* The only size==0 bios we expect are empty flushes. */
1140-
D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
1140+
D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
11411141
if (remote)
11421142
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
11431143
return remote;
@@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
11761176

11771177
if (bio_op(bio) != REQ_OP_READ)
11781178
type = DRBD_FAULT_DT_WR;
1179-
else if (bio->bi_rw & REQ_RAHEAD)
1179+
else if (bio->bi_opf & REQ_RAHEAD)
11801180
type = DRBD_FAULT_DT_RA;
11811181
else
11821182
type = DRBD_FAULT_DT_RD;

drivers/block/drbd/drbd_worker.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
256256
what = DISCARD_COMPLETED_WITH_ERROR;
257257
break;
258258
case REQ_OP_READ:
259-
if (bio->bi_rw & REQ_RAHEAD)
259+
if (bio->bi_opf & REQ_RAHEAD)
260260
what = READ_AHEAD_COMPLETED_WITH_ERROR;
261261
else
262262
what = READ_COMPLETED_WITH_ERROR;

drivers/block/pktcdvd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
11571157

11581158
bio_reset(pkt->bio);
11591159
pkt->bio->bi_bdev = pd->bdev;
1160-
pkt->bio->bi_rw = REQ_WRITE;
1160+
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
11611161
pkt->bio->bi_iter.bi_sector = new_sector;
11621162
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
11631163
pkt->bio->bi_vcnt = pkt->frames;

drivers/block/umem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
535535
*card->biotail = bio;
536536
bio->bi_next = NULL;
537537
card->biotail = &bio->bi_next;
538-
if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
538+
if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
539539
activate(card);
540540
spin_unlock_irq(&card->lock);
541541

drivers/md/bcache/request.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl)
208208
* Journal writes are marked REQ_PREFLUSH; if the original write was a
209209
* flush, it'll wait on the journal write.
210210
*/
211-
bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
211+
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
212212

213213
do {
214214
unsigned i;
@@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
405405
if (!congested &&
406406
mode == CACHE_MODE_WRITEBACK &&
407407
op_is_write(bio_op(bio)) &&
408-
(bio->bi_rw & REQ_SYNC))
408+
(bio->bi_opf & REQ_SYNC))
409409
goto rescale;
410410

411411
spin_lock(&dc->io_lock);
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
668668
s->iop.write_prio = 0;
669669
s->iop.error = 0;
670670
s->iop.flags = 0;
671-
s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
671+
s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
672672
s->iop.wq = bcache_wq;
673673

674674
return s;
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
796796
goto out_submit;
797797
}
798798

799-
if (!(bio->bi_rw & REQ_RAHEAD) &&
800-
!(bio->bi_rw & REQ_META) &&
799+
if (!(bio->bi_opf & REQ_RAHEAD) &&
800+
!(bio->bi_opf & REQ_META) &&
801801
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
802802
reada = min_t(sector_t, dc->readahead >> 9,
803803
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
920920
bch_writeback_add(dc);
921921
s->iop.bio = bio;
922922

923-
if (bio->bi_rw & REQ_PREFLUSH) {
923+
if (bio->bi_opf & REQ_PREFLUSH) {
924924
/* Also need to send a flush to the backing device */
925925
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
926926
dc->disk.bio_split);

0 commit comments

Comments
 (0)