@@ -1829,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1829
1829
{
1830
1830
struct nvme_fc_fcp_op * op = blk_mq_rq_to_pdu (rq );
1831
1831
1832
- return __nvme_fc_exit_request (set -> driver_data , op );
1832
+ return __nvme_fc_exit_request (to_fc_ctrl ( set -> driver_data ) , op );
1833
1833
}
1834
1834
1835
1835
static int
@@ -2135,7 +2135,7 @@ static int
2135
2135
nvme_fc_init_request (struct blk_mq_tag_set * set , struct request * rq ,
2136
2136
unsigned int hctx_idx , unsigned int numa_node )
2137
2137
{
2138
- struct nvme_fc_ctrl * ctrl = set -> driver_data ;
2138
+ struct nvme_fc_ctrl * ctrl = to_fc_ctrl ( set -> driver_data ) ;
2139
2139
struct nvme_fcp_op_w_sgl * op = blk_mq_rq_to_pdu (rq );
2140
2140
int queue_idx = (set == & ctrl -> tag_set ) ? hctx_idx + 1 : 0 ;
2141
2141
struct nvme_fc_queue * queue = & ctrl -> queues [queue_idx ];
@@ -2206,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2206
2206
}
2207
2207
}
2208
2208
2209
- static inline void
2210
- __nvme_fc_init_hctx (struct blk_mq_hw_ctx * hctx , struct nvme_fc_ctrl * ctrl ,
2211
- unsigned int qidx )
2209
+ static inline int
2210
+ __nvme_fc_init_hctx (struct blk_mq_hw_ctx * hctx , void * data , unsigned int qidx )
2212
2211
{
2212
+ struct nvme_fc_ctrl * ctrl = to_fc_ctrl (data );
2213
2213
struct nvme_fc_queue * queue = & ctrl -> queues [qidx ];
2214
2214
2215
2215
hctx -> driver_data = queue ;
2216
2216
queue -> hctx = hctx ;
2217
+ return 0 ;
2217
2218
}
2218
2219
2219
2220
static int
2220
- nvme_fc_init_hctx (struct blk_mq_hw_ctx * hctx , void * data ,
2221
- unsigned int hctx_idx )
2221
+ nvme_fc_init_hctx (struct blk_mq_hw_ctx * hctx , void * data , unsigned int hctx_idx )
2222
2222
{
2223
- struct nvme_fc_ctrl * ctrl = data ;
2224
-
2225
- __nvme_fc_init_hctx (hctx , ctrl , hctx_idx + 1 );
2226
-
2227
- return 0 ;
2223
+ return __nvme_fc_init_hctx (hctx , data , hctx_idx + 1 );
2228
2224
}
2229
2225
2230
2226
static int
2231
2227
nvme_fc_init_admin_hctx (struct blk_mq_hw_ctx * hctx , void * data ,
2232
2228
unsigned int hctx_idx )
2233
2229
{
2234
- struct nvme_fc_ctrl * ctrl = data ;
2235
-
2236
- __nvme_fc_init_hctx (hctx , ctrl , hctx_idx );
2237
-
2238
- return 0 ;
2230
+ return __nvme_fc_init_hctx (hctx , data , hctx_idx );
2239
2231
}
2240
2232
2241
2233
static void
@@ -2862,7 +2854,7 @@ nvme_fc_complete_rq(struct request *rq)
2862
2854
2863
2855
static void nvme_fc_map_queues (struct blk_mq_tag_set * set )
2864
2856
{
2865
- struct nvme_fc_ctrl * ctrl = set -> driver_data ;
2857
+ struct nvme_fc_ctrl * ctrl = to_fc_ctrl ( set -> driver_data ) ;
2866
2858
int i ;
2867
2859
2868
2860
for (i = 0 ; i < set -> nr_maps ; i ++ ) {
@@ -2923,7 +2915,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2923
2915
ctrl -> tag_set .cmd_size =
2924
2916
struct_size ((struct nvme_fcp_op_w_sgl * )NULL , priv ,
2925
2917
ctrl -> lport -> ops -> fcprqst_priv_sz );
2926
- ctrl -> tag_set .driver_data = ctrl ;
2918
+ ctrl -> tag_set .driver_data = & ctrl -> ctrl ;
2927
2919
ctrl -> tag_set .nr_hw_queues = ctrl -> ctrl .queue_count - 1 ;
2928
2920
ctrl -> tag_set .timeout = NVME_IO_TIMEOUT ;
2929
2921
@@ -3546,7 +3538,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3546
3538
ctrl -> admin_tag_set .cmd_size =
3547
3539
struct_size ((struct nvme_fcp_op_w_sgl * )NULL , priv ,
3548
3540
ctrl -> lport -> ops -> fcprqst_priv_sz );
3549
- ctrl -> admin_tag_set .driver_data = ctrl ;
3541
+ ctrl -> admin_tag_set .driver_data = & ctrl -> ctrl ;
3550
3542
ctrl -> admin_tag_set .nr_hw_queues = 1 ;
3551
3543
ctrl -> admin_tag_set .timeout = NVME_ADMIN_TIMEOUT ;
3552
3544
ctrl -> admin_tag_set .flags = BLK_MQ_F_NO_SCHED ;
0 commit comments