@@ -465,7 +465,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
465
465
struct request * rq , unsigned int hctx_idx ,
466
466
unsigned int numa_node )
467
467
{
468
- struct nvme_tcp_ctrl * ctrl = set -> driver_data ;
468
+ struct nvme_tcp_ctrl * ctrl = to_tcp_ctrl ( set -> driver_data ) ;
469
469
struct nvme_tcp_request * req = blk_mq_rq_to_pdu (rq );
470
470
struct nvme_tcp_cmd_pdu * pdu ;
471
471
int queue_idx = (set == & ctrl -> tag_set ) ? hctx_idx + 1 : 0 ;
@@ -489,7 +489,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
489
489
static int nvme_tcp_init_hctx (struct blk_mq_hw_ctx * hctx , void * data ,
490
490
unsigned int hctx_idx )
491
491
{
492
- struct nvme_tcp_ctrl * ctrl = data ;
492
+ struct nvme_tcp_ctrl * ctrl = to_tcp_ctrl ( data ) ;
493
493
struct nvme_tcp_queue * queue = & ctrl -> queues [hctx_idx + 1 ];
494
494
495
495
hctx -> driver_data = queue ;
@@ -499,7 +499,7 @@ static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
499
499
static int nvme_tcp_init_admin_hctx (struct blk_mq_hw_ctx * hctx , void * data ,
500
500
unsigned int hctx_idx )
501
501
{
502
- struct nvme_tcp_ctrl * ctrl = data ;
502
+ struct nvme_tcp_ctrl * ctrl = to_tcp_ctrl ( data ) ;
503
503
struct nvme_tcp_queue * queue = & ctrl -> queues [0 ];
504
504
505
505
hctx -> driver_data = queue ;
@@ -1700,7 +1700,7 @@ static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
1700
1700
set -> numa_node = nctrl -> numa_node ;
1701
1701
set -> flags = BLK_MQ_F_BLOCKING ;
1702
1702
set -> cmd_size = sizeof (struct nvme_tcp_request );
1703
- set -> driver_data = ctrl ;
1703
+ set -> driver_data = & ctrl -> ctrl ;
1704
1704
set -> nr_hw_queues = 1 ;
1705
1705
set -> timeout = NVME_ADMIN_TIMEOUT ;
1706
1706
ret = blk_mq_alloc_tag_set (set );
@@ -1722,7 +1722,7 @@ static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
1722
1722
set -> numa_node = nctrl -> numa_node ;
1723
1723
set -> flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING ;
1724
1724
set -> cmd_size = sizeof (struct nvme_tcp_request );
1725
- set -> driver_data = ctrl ;
1725
+ set -> driver_data = & ctrl -> ctrl ;
1726
1726
set -> nr_hw_queues = nctrl -> queue_count - 1 ;
1727
1727
set -> timeout = NVME_IO_TIMEOUT ;
1728
1728
set -> nr_maps = nctrl -> opts -> nr_poll_queues ? HCTX_MAX_TYPES : 2 ;
@@ -2486,7 +2486,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2486
2486
2487
2487
static void nvme_tcp_map_queues (struct blk_mq_tag_set * set )
2488
2488
{
2489
- struct nvme_tcp_ctrl * ctrl = set -> driver_data ;
2489
+ struct nvme_tcp_ctrl * ctrl = to_tcp_ctrl ( set -> driver_data ) ;
2490
2490
struct nvmf_ctrl_options * opts = ctrl -> ctrl .opts ;
2491
2491
2492
2492
if (opts -> nr_write_queues && ctrl -> io_queues [HCTX_TYPE_READ ]) {
0 commit comments