@@ -391,11 +391,19 @@ static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
391
391
struct nvme_uring_cmd_pdu * pdu = nvme_uring_cmd_pdu (ioucmd );
392
392
/* extract bio before reusing the same field for request */
393
393
struct bio * bio = pdu -> bio ;
394
+ void * cookie = READ_ONCE (ioucmd -> cookie );
394
395
395
396
pdu -> req = req ;
396
397
req -> bio = bio ;
397
- /* this takes care of moving rest of completion-work to task context */
398
- io_uring_cmd_complete_in_task (ioucmd , nvme_uring_task_cb );
398
+
399
+ /*
400
+ * For iopoll, complete it directly.
401
+ * Otherwise, move the completion to task work.
402
+ */
403
+ if (cookie != NULL && blk_rq_is_poll (req ))
404
+ nvme_uring_task_cb (ioucmd );
405
+ else
406
+ io_uring_cmd_complete_in_task (ioucmd , nvme_uring_task_cb );
399
407
}
400
408
401
409
static int nvme_uring_cmd_io (struct nvme_ctrl * ctrl , struct nvme_ns * ns ,
@@ -445,7 +453,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
445
453
rq_flags = REQ_NOWAIT ;
446
454
blk_flags = BLK_MQ_REQ_NOWAIT ;
447
455
}
456
+ if (issue_flags & IO_URING_F_IOPOLL )
457
+ rq_flags |= REQ_POLLED ;
448
458
459
+ retry :
449
460
req = nvme_alloc_user_request (q , & c , nvme_to_user_ptr (d .addr ),
450
461
d .data_len , nvme_to_user_ptr (d .metadata ),
451
462
d .metadata_len , 0 , & meta , d .timeout_ms ?
@@ -456,6 +467,17 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
456
467
req -> end_io = nvme_uring_cmd_end_io ;
457
468
req -> end_io_data = ioucmd ;
458
469
470
+ if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED ) {
471
+ if (unlikely (!req -> bio )) {
472
+ /* we can't poll this, so alloc regular req instead */
473
+ blk_mq_free_request (req );
474
+ rq_flags &= ~REQ_POLLED ;
475
+ goto retry ;
476
+ } else {
477
+ WRITE_ONCE (ioucmd -> cookie , req -> bio );
478
+ req -> bio -> bi_opf |= REQ_POLLED ;
479
+ }
480
+ }
459
481
/* to free bio on completion, as req->bio will be null at that time */
460
482
pdu -> bio = req -> bio ;
461
483
pdu -> meta = meta ;
@@ -559,9 +581,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
559
581
560
582
static int nvme_uring_cmd_checks (unsigned int issue_flags )
561
583
{
562
- /* IOPOLL not supported yet */
563
- if (issue_flags & IO_URING_F_IOPOLL )
564
- return - EOPNOTSUPP ;
565
584
566
585
/* NVMe passthrough requires big SQE/CQE support */
567
586
if ((issue_flags & (IO_URING_F_SQE128 |IO_URING_F_CQE32 )) !=
@@ -604,6 +623,23 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
604
623
return nvme_ns_uring_cmd (ns , ioucmd , issue_flags );
605
624
}
606
625
626
+ int nvme_ns_chr_uring_cmd_iopoll (struct io_uring_cmd * ioucmd )
627
+ {
628
+ struct bio * bio ;
629
+ int ret = 0 ;
630
+ struct nvme_ns * ns ;
631
+ struct request_queue * q ;
632
+
633
+ rcu_read_lock ();
634
+ bio = READ_ONCE (ioucmd -> cookie );
635
+ ns = container_of (file_inode (ioucmd -> file )-> i_cdev ,
636
+ struct nvme_ns , cdev );
637
+ q = ns -> queue ;
638
+ if (test_bit (QUEUE_FLAG_POLL , & q -> queue_flags ) && bio && bio -> bi_bdev )
639
+ ret = bio_poll (bio , NULL , 0 );
640
+ rcu_read_unlock ();
641
+ return ret ;
642
+ }
607
643
#ifdef CONFIG_NVME_MULTIPATH
608
644
static int nvme_ns_head_ctrl_ioctl (struct nvme_ns * ns , unsigned int cmd ,
609
645
void __user * argp , struct nvme_ns_head * head , int srcu_idx )
@@ -685,13 +721,40 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
685
721
srcu_read_unlock (& head -> srcu , srcu_idx );
686
722
return ret ;
687
723
}
724
+
725
+ int nvme_ns_head_chr_uring_cmd_iopoll (struct io_uring_cmd * ioucmd )
726
+ {
727
+ struct cdev * cdev = file_inode (ioucmd -> file )-> i_cdev ;
728
+ struct nvme_ns_head * head = container_of (cdev , struct nvme_ns_head , cdev );
729
+ int srcu_idx = srcu_read_lock (& head -> srcu );
730
+ struct nvme_ns * ns = nvme_find_path (head );
731
+ struct bio * bio ;
732
+ int ret = 0 ;
733
+ struct request_queue * q ;
734
+
735
+ if (ns ) {
736
+ rcu_read_lock ();
737
+ bio = READ_ONCE (ioucmd -> cookie );
738
+ q = ns -> queue ;
739
+ if (test_bit (QUEUE_FLAG_POLL , & q -> queue_flags ) && bio
740
+ && bio -> bi_bdev )
741
+ ret = bio_poll (bio , NULL , 0 );
742
+ rcu_read_unlock ();
743
+ }
744
+ srcu_read_unlock (& head -> srcu , srcu_idx );
745
+ return ret ;
746
+ }
688
747
#endif /* CONFIG_NVME_MULTIPATH */
689
748
690
749
int nvme_dev_uring_cmd (struct io_uring_cmd * ioucmd , unsigned int issue_flags )
691
750
{
692
751
struct nvme_ctrl * ctrl = ioucmd -> file -> private_data ;
693
752
int ret ;
694
753
754
+ /* IOPOLL not supported yet */
755
+ if (issue_flags & IO_URING_F_IOPOLL )
756
+ return - EOPNOTSUPP ;
757
+
695
758
ret = nvme_uring_cmd_checks (issue_flags );
696
759
if (ret )
697
760
return ret ;
0 commit comments