@@ -295,7 +295,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
295
295
struct request * rq , unsigned int hctx_idx ,
296
296
unsigned int numa_node )
297
297
{
298
- struct nvme_rdma_ctrl * ctrl = set -> driver_data ;
298
+ struct nvme_rdma_ctrl * ctrl = to_rdma_ctrl ( set -> driver_data ) ;
299
299
struct nvme_rdma_request * req = blk_mq_rq_to_pdu (rq );
300
300
int queue_idx = (set == & ctrl -> tag_set ) ? hctx_idx + 1 : 0 ;
301
301
struct nvme_rdma_queue * queue = & ctrl -> queues [queue_idx ];
@@ -320,7 +320,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
320
320
static int nvme_rdma_init_hctx (struct blk_mq_hw_ctx * hctx , void * data ,
321
321
unsigned int hctx_idx )
322
322
{
323
- struct nvme_rdma_ctrl * ctrl = data ;
323
+ struct nvme_rdma_ctrl * ctrl = to_rdma_ctrl ( data ) ;
324
324
struct nvme_rdma_queue * queue = & ctrl -> queues [hctx_idx + 1 ];
325
325
326
326
BUG_ON (hctx_idx >= ctrl -> ctrl .queue_count );
@@ -332,7 +332,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
332
332
static int nvme_rdma_init_admin_hctx (struct blk_mq_hw_ctx * hctx , void * data ,
333
333
unsigned int hctx_idx )
334
334
{
335
- struct nvme_rdma_ctrl * ctrl = data ;
335
+ struct nvme_rdma_ctrl * ctrl = to_rdma_ctrl ( data ) ;
336
336
struct nvme_rdma_queue * queue = & ctrl -> queues [0 ];
337
337
338
338
BUG_ON (hctx_idx != 0 );
@@ -801,7 +801,7 @@ static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
801
801
set -> numa_node = nctrl -> numa_node ;
802
802
set -> cmd_size = sizeof (struct nvme_rdma_request ) +
803
803
NVME_RDMA_DATA_SGL_SIZE ;
804
- set -> driver_data = ctrl ;
804
+ set -> driver_data = & ctrl -> ctrl ;
805
805
set -> nr_hw_queues = 1 ;
806
806
set -> timeout = NVME_ADMIN_TIMEOUT ;
807
807
set -> flags = BLK_MQ_F_NO_SCHED ;
@@ -828,7 +828,7 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
828
828
if (nctrl -> max_integrity_segments )
829
829
set -> cmd_size += sizeof (struct nvme_rdma_sgl ) +
830
830
NVME_RDMA_METADATA_SGL_SIZE ;
831
- set -> driver_data = ctrl ;
831
+ set -> driver_data = & ctrl -> ctrl ;
832
832
set -> nr_hw_queues = nctrl -> queue_count - 1 ;
833
833
set -> timeout = NVME_IO_TIMEOUT ;
834
834
set -> nr_maps = nctrl -> opts -> nr_poll_queues ? HCTX_MAX_TYPES : 2 ;
@@ -2206,7 +2206,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
2206
2206
2207
2207
static void nvme_rdma_map_queues (struct blk_mq_tag_set * set )
2208
2208
{
2209
- struct nvme_rdma_ctrl * ctrl = set -> driver_data ;
2209
+ struct nvme_rdma_ctrl * ctrl = to_rdma_ctrl ( set -> driver_data ) ;
2210
2210
struct nvmf_ctrl_options * opts = ctrl -> ctrl .opts ;
2211
2211
2212
2212
if (opts -> nr_write_queues && ctrl -> io_queues [HCTX_TYPE_READ ]) {
0 commit comments