@@ -305,6 +305,85 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
305
305
return ret ;
306
306
}
307
307
308
+ static void blkdev_bio_end_io_async (struct bio * bio )
309
+ {
310
+ struct blkdev_dio * dio = container_of (bio , struct blkdev_dio , bio );
311
+ struct kiocb * iocb = dio -> iocb ;
312
+ ssize_t ret ;
313
+
314
+ if (likely (!bio -> bi_status )) {
315
+ ret = dio -> size ;
316
+ iocb -> ki_pos += ret ;
317
+ } else {
318
+ ret = blk_status_to_errno (bio -> bi_status );
319
+ }
320
+
321
+ iocb -> ki_complete (iocb , ret , 0 );
322
+
323
+ if (dio -> flags & DIO_SHOULD_DIRTY ) {
324
+ bio_check_pages_dirty (bio );
325
+ } else {
326
+ bio_release_pages (bio , false);
327
+ bio_put (bio );
328
+ }
329
+ }
330
+
331
+ static ssize_t __blkdev_direct_IO_async (struct kiocb * iocb ,
332
+ struct iov_iter * iter ,
333
+ unsigned int nr_pages )
334
+ {
335
+ struct block_device * bdev = iocb -> ki_filp -> private_data ;
336
+ struct blkdev_dio * dio ;
337
+ struct bio * bio ;
338
+ loff_t pos = iocb -> ki_pos ;
339
+ int ret = 0 ;
340
+
341
+ if ((pos | iov_iter_alignment (iter )) &
342
+ (bdev_logical_block_size (bdev ) - 1 ))
343
+ return - EINVAL ;
344
+
345
+ bio = bio_alloc_kiocb (iocb , nr_pages , & blkdev_dio_pool );
346
+ dio = container_of (bio , struct blkdev_dio , bio );
347
+ dio -> flags = 0 ;
348
+ dio -> iocb = iocb ;
349
+ bio_set_dev (bio , bdev );
350
+ bio -> bi_iter .bi_sector = pos >> SECTOR_SHIFT ;
351
+ bio -> bi_write_hint = iocb -> ki_hint ;
352
+ bio -> bi_end_io = blkdev_bio_end_io_async ;
353
+ bio -> bi_ioprio = iocb -> ki_ioprio ;
354
+
355
+ ret = bio_iov_iter_get_pages (bio , iter );
356
+ if (unlikely (ret )) {
357
+ bio -> bi_status = BLK_STS_IOERR ;
358
+ bio_endio (bio );
359
+ return ret ;
360
+ }
361
+ dio -> size = bio -> bi_iter .bi_size ;
362
+
363
+ if (iov_iter_rw (iter ) == READ ) {
364
+ bio -> bi_opf = REQ_OP_READ ;
365
+ if (iter_is_iovec (iter )) {
366
+ dio -> flags |= DIO_SHOULD_DIRTY ;
367
+ bio_set_pages_dirty (bio );
368
+ }
369
+ } else {
370
+ bio -> bi_opf = dio_bio_write_op (iocb );
371
+ task_io_account_write (bio -> bi_iter .bi_size );
372
+ }
373
+
374
+ if (iocb -> ki_flags & IOCB_NOWAIT )
375
+ bio -> bi_opf |= REQ_NOWAIT ;
376
+
377
+ if (iocb -> ki_flags & IOCB_HIPRI ) {
378
+ bio_set_polled (bio , iocb );
379
+ submit_bio (bio );
380
+ WRITE_ONCE (iocb -> private , bio );
381
+ } else {
382
+ submit_bio (bio );
383
+ }
384
+ return - EIOCBQUEUED ;
385
+ }
386
+
308
387
static ssize_t blkdev_direct_IO (struct kiocb * iocb , struct iov_iter * iter )
309
388
{
310
389
unsigned int nr_pages ;
@@ -313,9 +392,11 @@ static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
313
392
return 0 ;
314
393
315
394
nr_pages = bio_iov_vecs_to_alloc (iter , BIO_MAX_VECS + 1 );
316
- if (is_sync_kiocb (iocb ) && nr_pages <= BIO_MAX_VECS )
317
- return __blkdev_direct_IO_simple (iocb , iter , nr_pages );
318
-
395
+ if (likely (nr_pages <= BIO_MAX_VECS )) {
396
+ if (is_sync_kiocb (iocb ))
397
+ return __blkdev_direct_IO_simple (iocb , iter , nr_pages );
398
+ return __blkdev_direct_IO_async (iocb , iter , nr_pages );
399
+ }
319
400
return __blkdev_direct_IO (iocb , iter , bio_max_segs (nr_pages ));
320
401
}
321
402
0 commit comments