@@ -161,6 +161,73 @@ static inline unsigned get_max_io_size(struct request_queue *q,
161
161
return sectors ;
162
162
}
163
163
164
+ static unsigned get_max_segment_size (struct request_queue * q ,
165
+ unsigned offset )
166
+ {
167
+ unsigned long mask = queue_segment_boundary (q );
168
+
169
+ /* default segment boundary mask means no boundary limit */
170
+ if (mask == BLK_SEG_BOUNDARY_MASK )
171
+ return queue_max_segment_size (q );
172
+
173
+ return min_t (unsigned long , mask - (mask & offset ) + 1 ,
174
+ queue_max_segment_size (q ));
175
+ }
176
+
177
+ /*
178
+ * Split the bvec @bv into segments, and update all kinds of
179
+ * variables.
180
+ */
181
+ static bool bvec_split_segs (struct request_queue * q , struct bio_vec * bv ,
182
+ unsigned * nsegs , unsigned * last_seg_size ,
183
+ unsigned * front_seg_size , unsigned * sectors )
184
+ {
185
+ unsigned len = bv -> bv_len ;
186
+ unsigned total_len = 0 ;
187
+ unsigned new_nsegs = 0 , seg_size = 0 ;
188
+
189
+ /*
190
+ * Multi-page bvec may be too big to hold in one segment, so the
191
+ * current bvec has to be splitted as multiple segments.
192
+ */
193
+ while (len && new_nsegs + * nsegs < queue_max_segments (q )) {
194
+ seg_size = get_max_segment_size (q , bv -> bv_offset + total_len );
195
+ seg_size = min (seg_size , len );
196
+
197
+ new_nsegs ++ ;
198
+ total_len += seg_size ;
199
+ len -= seg_size ;
200
+
201
+ if ((bv -> bv_offset + total_len ) & queue_virt_boundary (q ))
202
+ break ;
203
+ }
204
+
205
+ if (!new_nsegs )
206
+ return !!len ;
207
+
208
+ /* update front segment size */
209
+ if (!* nsegs ) {
210
+ unsigned first_seg_size ;
211
+
212
+ if (new_nsegs == 1 )
213
+ first_seg_size = get_max_segment_size (q , bv -> bv_offset );
214
+ else
215
+ first_seg_size = queue_max_segment_size (q );
216
+
217
+ if (* front_seg_size < first_seg_size )
218
+ * front_seg_size = first_seg_size ;
219
+ }
220
+
221
+ /* update other varibles */
222
+ * last_seg_size = seg_size ;
223
+ * nsegs += new_nsegs ;
224
+ if (sectors )
225
+ * sectors += total_len >> 9 ;
226
+
227
+ /* split in the middle of the bvec if len != 0 */
228
+ return !!len ;
229
+ }
230
+
164
231
static struct bio * blk_bio_segment_split (struct request_queue * q ,
165
232
struct bio * bio ,
166
233
struct bio_set * bs ,
@@ -174,7 +241,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
174
241
struct bio * new = NULL ;
175
242
const unsigned max_sectors = get_max_io_size (q , bio );
176
243
177
- bio_for_each_segment (bv , bio , iter ) {
244
+ bio_for_each_bvec (bv , bio , iter ) {
178
245
/*
179
246
* If the queue doesn't support SG gaps and adding this
180
247
* offset would create a gap, disallow it.
@@ -189,8 +256,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
189
256
*/
190
257
if (nsegs < queue_max_segments (q ) &&
191
258
sectors < max_sectors ) {
192
- nsegs ++ ;
193
- sectors = max_sectors ;
259
+ /* split in the middle of bvec */
260
+ bv .bv_len = (max_sectors - sectors ) << 9 ;
261
+ bvec_split_segs (q , & bv , & nsegs ,
262
+ & seg_size ,
263
+ & front_seg_size ,
264
+ & sectors );
194
265
}
195
266
goto split ;
196
267
}
@@ -212,14 +283,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
212
283
if (nsegs == queue_max_segments (q ))
213
284
goto split ;
214
285
215
- if (nsegs == 1 && seg_size > front_seg_size )
216
- front_seg_size = seg_size ;
217
-
218
- nsegs ++ ;
219
286
bvprv = bv ;
220
287
bvprvp = & bvprv ;
221
- seg_size = bv .bv_len ;
222
- sectors += bv .bv_len >> 9 ;
288
+
289
+ if (bvec_split_segs (q , & bv , & nsegs , & seg_size ,
290
+ & front_seg_size , & sectors ))
291
+ goto split ;
223
292
224
293
}
225
294
@@ -233,8 +302,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
233
302
bio = new ;
234
303
}
235
304
236
- if (nsegs == 1 && seg_size > front_seg_size )
237
- front_seg_size = seg_size ;
238
305
bio -> bi_seg_front_size = front_seg_size ;
239
306
if (seg_size > bio -> bi_seg_back_size )
240
307
bio -> bi_seg_back_size = seg_size ;
@@ -297,6 +364,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
297
364
struct bio_vec bv , bvprv = { NULL };
298
365
int prev = 0 ;
299
366
unsigned int seg_size , nr_phys_segs ;
367
+ unsigned front_seg_size = bio -> bi_seg_front_size ;
300
368
struct bio * fbio , * bbio ;
301
369
struct bvec_iter iter ;
302
370
@@ -316,7 +384,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
316
384
seg_size = 0 ;
317
385
nr_phys_segs = 0 ;
318
386
for_each_bio (bio ) {
319
- bio_for_each_segment (bv , bio , iter ) {
387
+ bio_for_each_bvec (bv , bio , iter ) {
320
388
/*
321
389
* If SG merging is disabled, each bio vector is
322
390
* a segment
@@ -336,20 +404,15 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
336
404
continue ;
337
405
}
338
406
new_segment :
339
- if (nr_phys_segs == 1 && seg_size >
340
- fbio -> bi_seg_front_size )
341
- fbio -> bi_seg_front_size = seg_size ;
342
-
343
- nr_phys_segs ++ ;
344
407
bvprv = bv ;
345
408
prev = 1 ;
346
- seg_size = bv .bv_len ;
409
+ bvec_split_segs (q , & bv , & nr_phys_segs , & seg_size ,
410
+ & front_seg_size , NULL );
347
411
}
348
412
bbio = bio ;
349
413
}
350
414
351
- if (nr_phys_segs == 1 && seg_size > fbio -> bi_seg_front_size )
352
- fbio -> bi_seg_front_size = seg_size ;
415
+ fbio -> bi_seg_front_size = front_seg_size ;
353
416
if (seg_size > bbio -> bi_seg_back_size )
354
417
bbio -> bi_seg_back_size = seg_size ;
355
418
0 commit comments