@@ -788,64 +788,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
788
788
return ret ;
789
789
}
790
790
791
- static int nvme_rdma_alloc_admin_tag_set (struct nvme_ctrl * nctrl )
791
+ static int nvme_rdma_alloc_tag_set (struct nvme_ctrl * ctrl )
792
792
{
793
- struct nvme_rdma_ctrl * ctrl = to_rdma_ctrl (nctrl );
794
- struct blk_mq_tag_set * set = & ctrl -> admin_tag_set ;
795
- int ret ;
793
+ unsigned int cmd_size = sizeof (struct nvme_rdma_request ) +
794
+ NVME_RDMA_DATA_SGL_SIZE ;
796
795
797
- memset (set , 0 , sizeof (* set ));
798
- set -> ops = & nvme_rdma_admin_mq_ops ;
799
- set -> queue_depth = NVME_AQ_MQ_TAG_DEPTH ;
800
- set -> reserved_tags = NVMF_RESERVED_TAGS ;
801
- set -> numa_node = nctrl -> numa_node ;
802
- set -> cmd_size = sizeof (struct nvme_rdma_request ) +
803
- NVME_RDMA_DATA_SGL_SIZE ;
804
- set -> driver_data = & ctrl -> ctrl ;
805
- set -> nr_hw_queues = 1 ;
806
- set -> timeout = NVME_ADMIN_TIMEOUT ;
807
- set -> flags = BLK_MQ_F_NO_SCHED ;
808
- ret = blk_mq_alloc_tag_set (set );
809
- if (!ret )
810
- ctrl -> ctrl .admin_tagset = set ;
811
- return ret ;
812
- }
813
-
814
- static int nvme_rdma_alloc_tag_set (struct nvme_ctrl * nctrl )
815
- {
816
- struct nvme_rdma_ctrl * ctrl = to_rdma_ctrl (nctrl );
817
- struct blk_mq_tag_set * set = & ctrl -> tag_set ;
818
- int ret ;
796
+ if (ctrl -> max_integrity_segments )
797
+ cmd_size += sizeof (struct nvme_rdma_sgl ) +
798
+ NVME_RDMA_METADATA_SGL_SIZE ;
819
799
820
- memset (set , 0 , sizeof (* set ));
821
- set -> ops = & nvme_rdma_mq_ops ;
822
- set -> queue_depth = nctrl -> sqsize + 1 ;
823
- set -> reserved_tags = NVMF_RESERVED_TAGS ;
824
- set -> numa_node = nctrl -> numa_node ;
825
- set -> flags = BLK_MQ_F_SHOULD_MERGE ;
826
- set -> cmd_size = sizeof (struct nvme_rdma_request ) +
827
- NVME_RDMA_DATA_SGL_SIZE ;
828
- if (nctrl -> max_integrity_segments )
829
- set -> cmd_size += sizeof (struct nvme_rdma_sgl ) +
830
- NVME_RDMA_METADATA_SGL_SIZE ;
831
- set -> driver_data = & ctrl -> ctrl ;
832
- set -> nr_hw_queues = nctrl -> queue_count - 1 ;
833
- set -> timeout = NVME_IO_TIMEOUT ;
834
- set -> nr_maps = nctrl -> opts -> nr_poll_queues ? HCTX_MAX_TYPES : 2 ;
835
- ret = blk_mq_alloc_tag_set (set );
836
- if (!ret )
837
- ctrl -> ctrl .tagset = set ;
838
- return ret ;
800
+ return nvme_alloc_io_tag_set (ctrl , & to_rdma_ctrl (ctrl )-> tag_set ,
801
+ & nvme_rdma_mq_ops , BLK_MQ_F_SHOULD_MERGE , cmd_size );
839
802
}
840
803
841
- static void nvme_rdma_destroy_admin_queue (struct nvme_rdma_ctrl * ctrl ,
842
- bool remove )
804
+ static void nvme_rdma_destroy_admin_queue (struct nvme_rdma_ctrl * ctrl )
843
805
{
844
- if (remove ) {
845
- blk_mq_destroy_queue (ctrl -> ctrl .admin_q );
846
- blk_mq_destroy_queue (ctrl -> ctrl .fabrics_q );
847
- blk_mq_free_tag_set (ctrl -> ctrl .admin_tagset );
848
- }
849
806
if (ctrl -> async_event_sqe .data ) {
850
807
cancel_work_sync (& ctrl -> ctrl .async_event_work );
851
808
nvme_rdma_free_qe (ctrl -> device -> dev , & ctrl -> async_event_sqe ,
@@ -887,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
887
844
goto out_free_queue ;
888
845
889
846
if (new ) {
890
- error = nvme_rdma_alloc_admin_tag_set (& ctrl -> ctrl );
847
+ error = nvme_alloc_admin_tag_set (& ctrl -> ctrl ,
848
+ & ctrl -> admin_tag_set , & nvme_rdma_admin_mq_ops ,
849
+ BLK_MQ_F_NO_SCHED ,
850
+ sizeof (struct nvme_rdma_request ) +
851
+ NVME_RDMA_DATA_SGL_SIZE );
891
852
if (error )
892
853
goto out_free_async_qe ;
893
854
894
- ctrl -> ctrl .fabrics_q = blk_mq_init_queue (& ctrl -> admin_tag_set );
895
- if (IS_ERR (ctrl -> ctrl .fabrics_q )) {
896
- error = PTR_ERR (ctrl -> ctrl .fabrics_q );
897
- goto out_free_tagset ;
898
- }
899
-
900
- ctrl -> ctrl .admin_q = blk_mq_init_queue (& ctrl -> admin_tag_set );
901
- if (IS_ERR (ctrl -> ctrl .admin_q )) {
902
- error = PTR_ERR (ctrl -> ctrl .admin_q );
903
- goto out_cleanup_fabrics_q ;
904
- }
905
855
}
906
856
907
857
error = nvme_rdma_start_queue (ctrl , 0 );
908
858
if (error )
909
- goto out_cleanup_queue ;
859
+ goto out_remove_admin_tag_set ;
910
860
911
861
error = nvme_enable_ctrl (& ctrl -> ctrl );
912
862
if (error )
@@ -933,15 +883,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
933
883
out_stop_queue :
934
884
nvme_rdma_stop_queue (& ctrl -> queues [0 ]);
935
885
nvme_cancel_admin_tagset (& ctrl -> ctrl );
936
- out_cleanup_queue :
886
+ out_remove_admin_tag_set :
937
887
if (new )
938
- blk_mq_destroy_queue (ctrl -> ctrl .admin_q );
939
- out_cleanup_fabrics_q :
940
- if (new )
941
- blk_mq_destroy_queue (ctrl -> ctrl .fabrics_q );
942
- out_free_tagset :
943
- if (new )
944
- blk_mq_free_tag_set (ctrl -> ctrl .admin_tagset );
888
+ nvme_remove_admin_tag_set (& ctrl -> ctrl );
945
889
out_free_async_qe :
946
890
if (ctrl -> async_event_sqe .data ) {
947
891
nvme_rdma_free_qe (ctrl -> device -> dev , & ctrl -> async_event_sqe ,
@@ -953,16 +897,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
953
897
return error ;
954
898
}
955
899
956
- static void nvme_rdma_destroy_io_queues (struct nvme_rdma_ctrl * ctrl ,
957
- bool remove )
958
- {
959
- if (remove ) {
960
- blk_mq_destroy_queue (ctrl -> ctrl .connect_q );
961
- blk_mq_free_tag_set (ctrl -> ctrl .tagset );
962
- }
963
- nvme_rdma_free_io_queues (ctrl );
964
- }
965
-
966
900
static int nvme_rdma_configure_io_queues (struct nvme_rdma_ctrl * ctrl , bool new )
967
901
{
968
902
int ret , nr_queues ;
@@ -975,10 +909,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
975
909
ret = nvme_rdma_alloc_tag_set (& ctrl -> ctrl );
976
910
if (ret )
977
911
goto out_free_io_queues ;
978
-
979
- ret = nvme_ctrl_init_connect_q (& (ctrl -> ctrl ));
980
- if (ret )
981
- goto out_free_tag_set ;
982
912
}
983
913
984
914
/*
@@ -989,7 +919,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
989
919
nr_queues = min (ctrl -> tag_set .nr_hw_queues + 1 , ctrl -> ctrl .queue_count );
990
920
ret = nvme_rdma_start_io_queues (ctrl , 1 , nr_queues );
991
921
if (ret )
992
- goto out_cleanup_connect_q ;
922
+ goto out_cleanup_tagset ;
993
923
994
924
if (!new ) {
995
925
nvme_start_queues (& ctrl -> ctrl );
@@ -1022,13 +952,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
1022
952
nvme_stop_queues (& ctrl -> ctrl );
1023
953
nvme_sync_io_queues (& ctrl -> ctrl );
1024
954
nvme_rdma_stop_io_queues (ctrl );
1025
- out_cleanup_connect_q :
955
+ out_cleanup_tagset :
1026
956
nvme_cancel_tagset (& ctrl -> ctrl );
1027
957
if (new )
1028
- blk_mq_destroy_queue (ctrl -> ctrl .connect_q );
1029
- out_free_tag_set :
1030
- if (new )
1031
- blk_mq_free_tag_set (ctrl -> ctrl .tagset );
958
+ nvme_remove_io_tag_set (& ctrl -> ctrl );
1032
959
out_free_io_queues :
1033
960
nvme_rdma_free_io_queues (ctrl );
1034
961
return ret ;
@@ -1041,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
1041
968
blk_sync_queue (ctrl -> ctrl .admin_q );
1042
969
nvme_rdma_stop_queue (& ctrl -> queues [0 ]);
1043
970
nvme_cancel_admin_tagset (& ctrl -> ctrl );
1044
- if (remove )
971
+ if (remove ) {
1045
972
nvme_start_admin_queue (& ctrl -> ctrl );
1046
- nvme_rdma_destroy_admin_queue (ctrl , remove );
973
+ nvme_remove_admin_tag_set (& ctrl -> ctrl );
974
+ }
975
+ nvme_rdma_destroy_admin_queue (ctrl );
1047
976
}
1048
977
1049
978
static void nvme_rdma_teardown_io_queues (struct nvme_rdma_ctrl * ctrl ,
@@ -1055,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
1055
984
nvme_sync_io_queues (& ctrl -> ctrl );
1056
985
nvme_rdma_stop_io_queues (ctrl );
1057
986
nvme_cancel_tagset (& ctrl -> ctrl );
1058
- if (remove )
987
+ if (remove ) {
1059
988
nvme_start_queues (& ctrl -> ctrl );
1060
- nvme_rdma_destroy_io_queues (ctrl , remove );
989
+ nvme_remove_io_tag_set (& ctrl -> ctrl );
990
+ }
991
+ nvme_rdma_free_io_queues (ctrl );
1061
992
}
1062
993
}
1063
994
@@ -1179,14 +1110,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
1179
1110
nvme_sync_io_queues (& ctrl -> ctrl );
1180
1111
nvme_rdma_stop_io_queues (ctrl );
1181
1112
nvme_cancel_tagset (& ctrl -> ctrl );
1182
- nvme_rdma_destroy_io_queues (ctrl , new );
1113
+ if (new )
1114
+ nvme_remove_io_tag_set (& ctrl -> ctrl );
1115
+ nvme_rdma_free_io_queues (ctrl );
1183
1116
}
1184
1117
destroy_admin :
1185
1118
nvme_stop_admin_queue (& ctrl -> ctrl );
1186
1119
blk_sync_queue (ctrl -> ctrl .admin_q );
1187
1120
nvme_rdma_stop_queue (& ctrl -> queues [0 ]);
1188
1121
nvme_cancel_admin_tagset (& ctrl -> ctrl );
1189
- nvme_rdma_destroy_admin_queue (ctrl , new );
1122
+ if (new )
1123
+ nvme_remove_admin_tag_set (& ctrl -> ctrl );
1124
+ nvme_rdma_destroy_admin_queue (ctrl );
1190
1125
return ret ;
1191
1126
}
1192
1127
0 commit comments