@@ -2383,20 +2383,16 @@ nvme_fc_ctrl_free(struct kref *ref)
2383
2383
container_of (ref , struct nvme_fc_ctrl , ref );
2384
2384
unsigned long flags ;
2385
2385
2386
- if (ctrl -> ctrl .tagset ) {
2387
- blk_mq_destroy_queue (ctrl -> ctrl .connect_q );
2388
- blk_mq_free_tag_set (& ctrl -> tag_set );
2389
- }
2386
+ if (ctrl -> ctrl .tagset )
2387
+ nvme_remove_io_tag_set (& ctrl -> ctrl );
2390
2388
2391
2389
/* remove from rport list */
2392
2390
spin_lock_irqsave (& ctrl -> rport -> lock , flags );
2393
2391
list_del (& ctrl -> ctrl_list );
2394
2392
spin_unlock_irqrestore (& ctrl -> rport -> lock , flags );
2395
2393
2396
2394
nvme_start_admin_queue (& ctrl -> ctrl );
2397
- blk_mq_destroy_queue (ctrl -> ctrl .admin_q );
2398
- blk_mq_destroy_queue (ctrl -> ctrl .fabrics_q );
2399
- blk_mq_free_tag_set (& ctrl -> admin_tag_set );
2395
+ nvme_remove_admin_tag_set (& ctrl -> ctrl );
2400
2396
2401
2397
kfree (ctrl -> queues );
2402
2398
@@ -2906,32 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2906
2902
2907
2903
nvme_fc_init_io_queues (ctrl );
2908
2904
2909
- memset (& ctrl -> tag_set , 0 , sizeof (ctrl -> tag_set ));
2910
- ctrl -> tag_set .ops = & nvme_fc_mq_ops ;
2911
- ctrl -> tag_set .queue_depth = ctrl -> ctrl .opts -> queue_size ;
2912
- ctrl -> tag_set .reserved_tags = NVMF_RESERVED_TAGS ;
2913
- ctrl -> tag_set .numa_node = ctrl -> ctrl .numa_node ;
2914
- ctrl -> tag_set .flags = BLK_MQ_F_SHOULD_MERGE ;
2915
- ctrl -> tag_set .cmd_size =
2916
- struct_size ((struct nvme_fcp_op_w_sgl * )NULL , priv ,
2917
- ctrl -> lport -> ops -> fcprqst_priv_sz );
2918
- ctrl -> tag_set .driver_data = & ctrl -> ctrl ;
2919
- ctrl -> tag_set .nr_hw_queues = ctrl -> ctrl .queue_count - 1 ;
2920
- ctrl -> tag_set .timeout = NVME_IO_TIMEOUT ;
2921
-
2922
- ret = blk_mq_alloc_tag_set (& ctrl -> tag_set );
2905
+ ret = nvme_alloc_io_tag_set (& ctrl -> ctrl , & ctrl -> tag_set ,
2906
+ & nvme_fc_mq_ops , BLK_MQ_F_SHOULD_MERGE ,
2907
+ struct_size ((struct nvme_fcp_op_w_sgl * )NULL , priv ,
2908
+ ctrl -> lport -> ops -> fcprqst_priv_sz ));
2923
2909
if (ret )
2924
2910
return ret ;
2925
2911
2926
- ctrl -> ctrl .tagset = & ctrl -> tag_set ;
2927
-
2928
- ret = nvme_ctrl_init_connect_q (& (ctrl -> ctrl ));
2929
- if (ret )
2930
- goto out_free_tag_set ;
2931
-
2932
2912
ret = nvme_fc_create_hw_io_queues (ctrl , ctrl -> ctrl .sqsize + 1 );
2933
2913
if (ret )
2934
- goto out_cleanup_blk_queue ;
2914
+ goto out_cleanup_tagset ;
2935
2915
2936
2916
ret = nvme_fc_connect_io_queues (ctrl , ctrl -> ctrl .sqsize + 1 );
2937
2917
if (ret )
@@ -2943,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2943
2923
2944
2924
out_delete_hw_queues :
2945
2925
nvme_fc_delete_hw_io_queues (ctrl );
2946
- out_cleanup_blk_queue :
2947
- blk_mq_destroy_queue (ctrl -> ctrl .connect_q );
2948
- out_free_tag_set :
2949
- blk_mq_free_tag_set (& ctrl -> tag_set );
2926
+ out_cleanup_tagset :
2927
+ nvme_remove_io_tag_set (& ctrl -> ctrl );
2950
2928
nvme_fc_free_io_queues (ctrl );
2951
2929
2952
2930
/* force put free routine to ignore io queues */
@@ -3530,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3530
3508
3531
3509
nvme_fc_init_queue (ctrl , 0 );
3532
3510
3533
- memset (& ctrl -> admin_tag_set , 0 , sizeof (ctrl -> admin_tag_set ));
3534
- ctrl -> admin_tag_set .ops = & nvme_fc_admin_mq_ops ;
3535
- ctrl -> admin_tag_set .queue_depth = NVME_AQ_MQ_TAG_DEPTH ;
3536
- ctrl -> admin_tag_set .reserved_tags = NVMF_RESERVED_TAGS ;
3537
- ctrl -> admin_tag_set .numa_node = ctrl -> ctrl .numa_node ;
3538
- ctrl -> admin_tag_set .cmd_size =
3539
- struct_size ((struct nvme_fcp_op_w_sgl * )NULL , priv ,
3540
- ctrl -> lport -> ops -> fcprqst_priv_sz );
3541
- ctrl -> admin_tag_set .driver_data = & ctrl -> ctrl ;
3542
- ctrl -> admin_tag_set .nr_hw_queues = 1 ;
3543
- ctrl -> admin_tag_set .timeout = NVME_ADMIN_TIMEOUT ;
3544
- ctrl -> admin_tag_set .flags = BLK_MQ_F_NO_SCHED ;
3545
-
3546
- ret = blk_mq_alloc_tag_set (& ctrl -> admin_tag_set );
3511
+ ret = nvme_alloc_admin_tag_set (& ctrl -> ctrl , & ctrl -> admin_tag_set ,
3512
+ & nvme_fc_admin_mq_ops , BLK_MQ_F_NO_SCHED ,
3513
+ struct_size ((struct nvme_fcp_op_w_sgl * )NULL , priv ,
3514
+ ctrl -> lport -> ops -> fcprqst_priv_sz ));
3547
3515
if (ret )
3548
3516
goto out_free_queues ;
3549
- ctrl -> ctrl .admin_tagset = & ctrl -> admin_tag_set ;
3550
-
3551
- ctrl -> ctrl .fabrics_q = blk_mq_init_queue (& ctrl -> admin_tag_set );
3552
- if (IS_ERR (ctrl -> ctrl .fabrics_q )) {
3553
- ret = PTR_ERR (ctrl -> ctrl .fabrics_q );
3554
- goto out_free_admin_tag_set ;
3555
- }
3556
-
3557
- ctrl -> ctrl .admin_q = blk_mq_init_queue (& ctrl -> admin_tag_set );
3558
- if (IS_ERR (ctrl -> ctrl .admin_q )) {
3559
- ret = PTR_ERR (ctrl -> ctrl .admin_q );
3560
- goto out_cleanup_fabrics_q ;
3561
- }
3562
3517
3563
3518
/*
3564
3519
* Would have been nice to init io queues tag set as well.
@@ -3569,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3569
3524
3570
3525
ret = nvme_init_ctrl (& ctrl -> ctrl , dev , & nvme_fc_ctrl_ops , 0 );
3571
3526
if (ret )
3572
- goto out_cleanup_admin_q ;
3527
+ goto out_cleanup_tagset ;
3573
3528
3574
3529
/* at this point, teardown path changes to ref counting on nvme ctrl */
3575
3530
@@ -3624,12 +3579,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3624
3579
3625
3580
return ERR_PTR (- EIO );
3626
3581
3627
- out_cleanup_admin_q :
3628
- blk_mq_destroy_queue (ctrl -> ctrl .admin_q );
3629
- out_cleanup_fabrics_q :
3630
- blk_mq_destroy_queue (ctrl -> ctrl .fabrics_q );
3631
- out_free_admin_tag_set :
3632
- blk_mq_free_tag_set (& ctrl -> admin_tag_set );
3582
+ out_cleanup_tagset :
3583
+ nvme_remove_admin_tag_set (& ctrl -> ctrl );
3633
3584
out_free_queues :
3634
3585
kfree (ctrl -> queues );
3635
3586
out_free_ida :
0 commit comments