Skip to content

Commit cefa103

Browse files
author
Christoph Hellwig
committed
nvme-rdma: use the tagset alloc/free helpers
Use the common helpers to allocate and free the tagsets. To make this work the generic nvme_ctrl now needs to be stored in the hctx private data instead of the nvme_rdma_ctrl. Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Sagi Grimberg <[email protected]> Reviewed-by: Chaitanya Kulkarni <[email protected]>
1 parent 2d60738 commit cefa103

File tree

1 file changed

+34
-99
lines changed

1 file changed

+34
-99
lines changed

drivers/nvme/host/rdma.c

Lines changed: 34 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -788,64 +788,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
788788
return ret;
789789
}
790790

791-
static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
791+
static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
792792
{
793-
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
794-
struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
795-
int ret;
793+
unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
794+
NVME_RDMA_DATA_SGL_SIZE;
796795

797-
memset(set, 0, sizeof(*set));
798-
set->ops = &nvme_rdma_admin_mq_ops;
799-
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
800-
set->reserved_tags = NVMF_RESERVED_TAGS;
801-
set->numa_node = nctrl->numa_node;
802-
set->cmd_size = sizeof(struct nvme_rdma_request) +
803-
NVME_RDMA_DATA_SGL_SIZE;
804-
set->driver_data = &ctrl->ctrl;
805-
set->nr_hw_queues = 1;
806-
set->timeout = NVME_ADMIN_TIMEOUT;
807-
set->flags = BLK_MQ_F_NO_SCHED;
808-
ret = blk_mq_alloc_tag_set(set);
809-
if (!ret)
810-
ctrl->ctrl.admin_tagset = set;
811-
return ret;
812-
}
813-
814-
static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
815-
{
816-
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
817-
struct blk_mq_tag_set *set = &ctrl->tag_set;
818-
int ret;
796+
if (ctrl->max_integrity_segments)
797+
cmd_size += sizeof(struct nvme_rdma_sgl) +
798+
NVME_RDMA_METADATA_SGL_SIZE;
819799

820-
memset(set, 0, sizeof(*set));
821-
set->ops = &nvme_rdma_mq_ops;
822-
set->queue_depth = nctrl->sqsize + 1;
823-
set->reserved_tags = NVMF_RESERVED_TAGS;
824-
set->numa_node = nctrl->numa_node;
825-
set->flags = BLK_MQ_F_SHOULD_MERGE;
826-
set->cmd_size = sizeof(struct nvme_rdma_request) +
827-
NVME_RDMA_DATA_SGL_SIZE;
828-
if (nctrl->max_integrity_segments)
829-
set->cmd_size += sizeof(struct nvme_rdma_sgl) +
830-
NVME_RDMA_METADATA_SGL_SIZE;
831-
set->driver_data = &ctrl->ctrl;
832-
set->nr_hw_queues = nctrl->queue_count - 1;
833-
set->timeout = NVME_IO_TIMEOUT;
834-
set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
835-
ret = blk_mq_alloc_tag_set(set);
836-
if (!ret)
837-
ctrl->ctrl.tagset = set;
838-
return ret;
800+
return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
801+
&nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
839802
}
840803

841-
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
842-
bool remove)
804+
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
843805
{
844-
if (remove) {
845-
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
846-
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
847-
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
848-
}
849806
if (ctrl->async_event_sqe.data) {
850807
cancel_work_sync(&ctrl->ctrl.async_event_work);
851808
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -887,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
887844
goto out_free_queue;
888845

889846
if (new) {
890-
error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
847+
error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
848+
&ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
849+
BLK_MQ_F_NO_SCHED,
850+
sizeof(struct nvme_rdma_request) +
851+
NVME_RDMA_DATA_SGL_SIZE);
891852
if (error)
892853
goto out_free_async_qe;
893854

894-
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
895-
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
896-
error = PTR_ERR(ctrl->ctrl.fabrics_q);
897-
goto out_free_tagset;
898-
}
899-
900-
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
901-
if (IS_ERR(ctrl->ctrl.admin_q)) {
902-
error = PTR_ERR(ctrl->ctrl.admin_q);
903-
goto out_cleanup_fabrics_q;
904-
}
905855
}
906856

907857
error = nvme_rdma_start_queue(ctrl, 0);
908858
if (error)
909-
goto out_cleanup_queue;
859+
goto out_remove_admin_tag_set;
910860

911861
error = nvme_enable_ctrl(&ctrl->ctrl);
912862
if (error)
@@ -933,15 +883,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
933883
out_stop_queue:
934884
nvme_rdma_stop_queue(&ctrl->queues[0]);
935885
nvme_cancel_admin_tagset(&ctrl->ctrl);
936-
out_cleanup_queue:
886+
out_remove_admin_tag_set:
937887
if (new)
938-
blk_mq_destroy_queue(ctrl->ctrl.admin_q);
939-
out_cleanup_fabrics_q:
940-
if (new)
941-
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
942-
out_free_tagset:
943-
if (new)
944-
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
888+
nvme_remove_admin_tag_set(&ctrl->ctrl);
945889
out_free_async_qe:
946890
if (ctrl->async_event_sqe.data) {
947891
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -953,16 +897,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
953897
return error;
954898
}
955899

956-
static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
957-
bool remove)
958-
{
959-
if (remove) {
960-
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
961-
blk_mq_free_tag_set(ctrl->ctrl.tagset);
962-
}
963-
nvme_rdma_free_io_queues(ctrl);
964-
}
965-
966900
static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
967901
{
968902
int ret, nr_queues;
@@ -975,10 +909,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
975909
ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
976910
if (ret)
977911
goto out_free_io_queues;
978-
979-
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
980-
if (ret)
981-
goto out_free_tag_set;
982912
}
983913

984914
/*
@@ -989,7 +919,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
989919
nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
990920
ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
991921
if (ret)
992-
goto out_cleanup_connect_q;
922+
goto out_cleanup_tagset;
993923

994924
if (!new) {
995925
nvme_start_queues(&ctrl->ctrl);
@@ -1022,13 +952,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
1022952
nvme_stop_queues(&ctrl->ctrl);
1023953
nvme_sync_io_queues(&ctrl->ctrl);
1024954
nvme_rdma_stop_io_queues(ctrl);
1025-
out_cleanup_connect_q:
955+
out_cleanup_tagset:
1026956
nvme_cancel_tagset(&ctrl->ctrl);
1027957
if (new)
1028-
blk_mq_destroy_queue(ctrl->ctrl.connect_q);
1029-
out_free_tag_set:
1030-
if (new)
1031-
blk_mq_free_tag_set(ctrl->ctrl.tagset);
958+
nvme_remove_io_tag_set(&ctrl->ctrl);
1032959
out_free_io_queues:
1033960
nvme_rdma_free_io_queues(ctrl);
1034961
return ret;
@@ -1041,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
1041968
blk_sync_queue(ctrl->ctrl.admin_q);
1042969
nvme_rdma_stop_queue(&ctrl->queues[0]);
1043970
nvme_cancel_admin_tagset(&ctrl->ctrl);
1044-
if (remove)
971+
if (remove) {
1045972
nvme_start_admin_queue(&ctrl->ctrl);
1046-
nvme_rdma_destroy_admin_queue(ctrl, remove);
973+
nvme_remove_admin_tag_set(&ctrl->ctrl);
974+
}
975+
nvme_rdma_destroy_admin_queue(ctrl);
1047976
}
1048977

1049978
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
@@ -1055,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
1055984
nvme_sync_io_queues(&ctrl->ctrl);
1056985
nvme_rdma_stop_io_queues(ctrl);
1057986
nvme_cancel_tagset(&ctrl->ctrl);
1058-
if (remove)
987+
if (remove) {
1059988
nvme_start_queues(&ctrl->ctrl);
1060-
nvme_rdma_destroy_io_queues(ctrl, remove);
989+
nvme_remove_io_tag_set(&ctrl->ctrl);
990+
}
991+
nvme_rdma_free_io_queues(ctrl);
1061992
}
1062993
}
1063994

@@ -1179,14 +1110,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
11791110
nvme_sync_io_queues(&ctrl->ctrl);
11801111
nvme_rdma_stop_io_queues(ctrl);
11811112
nvme_cancel_tagset(&ctrl->ctrl);
1182-
nvme_rdma_destroy_io_queues(ctrl, new);
1113+
if (new)
1114+
nvme_remove_io_tag_set(&ctrl->ctrl);
1115+
nvme_rdma_free_io_queues(ctrl);
11831116
}
11841117
destroy_admin:
11851118
nvme_stop_admin_queue(&ctrl->ctrl);
11861119
blk_sync_queue(ctrl->ctrl.admin_q);
11871120
nvme_rdma_stop_queue(&ctrl->queues[0]);
11881121
nvme_cancel_admin_tagset(&ctrl->ctrl);
1189-
nvme_rdma_destroy_admin_queue(ctrl, new);
1122+
if (new)
1123+
nvme_remove_admin_tag_set(&ctrl->ctrl);
1124+
nvme_rdma_destroy_admin_queue(ctrl);
11901125
return ret;
11911126
}
11921127

0 commit comments

Comments
 (0)