@@ -159,16 +159,17 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
159
159
EXPORT_SYMBOL (blk_mq_can_queue );
160
160
161
161
static void blk_mq_rq_ctx_init (struct request_queue * q , struct blk_mq_ctx * ctx ,
162
- struct request * rq , unsigned int rw_flags )
162
+ struct request * rq , int op ,
163
+ unsigned int op_flags )
163
164
{
164
165
if (blk_queue_io_stat (q ))
165
- rw_flags |= REQ_IO_STAT ;
166
+ op_flags |= REQ_IO_STAT ;
166
167
167
168
INIT_LIST_HEAD (& rq -> queuelist );
168
169
/* csd/requeue_work/fifo_time is initialized before use */
169
170
rq -> q = q ;
170
171
rq -> mq_ctx = ctx ;
171
- rq -> cmd_flags |= rw_flags ;
172
+ req_set_op_attrs ( rq , op , op_flags ) ;
172
173
/* do not touch atomic flags, it needs atomic ops against the timer */
173
174
rq -> cpu = -1 ;
174
175
INIT_HLIST_NODE (& rq -> hash );
@@ -203,11 +204,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
203
204
rq -> end_io_data = NULL ;
204
205
rq -> next_rq = NULL ;
205
206
206
- ctx -> rq_dispatched [rw_is_sync (rw_flags )]++ ;
207
+ ctx -> rq_dispatched [rw_is_sync (op | op_flags )]++ ;
207
208
}
208
209
209
210
static struct request *
210
- __blk_mq_alloc_request (struct blk_mq_alloc_data * data , int rw )
211
+ __blk_mq_alloc_request (struct blk_mq_alloc_data * data , int op , int op_flags )
211
212
{
212
213
struct request * rq ;
213
214
unsigned int tag ;
@@ -222,7 +223,7 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
222
223
}
223
224
224
225
rq -> tag = tag ;
225
- blk_mq_rq_ctx_init (data -> q , data -> ctx , rq , rw );
226
+ blk_mq_rq_ctx_init (data -> q , data -> ctx , rq , op , op_flags );
226
227
return rq ;
227
228
}
228
229
@@ -246,15 +247,15 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
246
247
hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
247
248
blk_mq_set_alloc_data (& alloc_data , q , flags , ctx , hctx );
248
249
249
- rq = __blk_mq_alloc_request (& alloc_data , rw );
250
+ rq = __blk_mq_alloc_request (& alloc_data , rw , 0 );
250
251
if (!rq && !(flags & BLK_MQ_REQ_NOWAIT )) {
251
252
__blk_mq_run_hw_queue (hctx );
252
253
blk_mq_put_ctx (ctx );
253
254
254
255
ctx = blk_mq_get_ctx (q );
255
256
hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
256
257
blk_mq_set_alloc_data (& alloc_data , q , flags , ctx , hctx );
257
- rq = __blk_mq_alloc_request (& alloc_data , rw );
258
+ rq = __blk_mq_alloc_request (& alloc_data , rw , 0 );
258
259
ctx = alloc_data .ctx ;
259
260
}
260
261
blk_mq_put_ctx (ctx );
@@ -1169,28 +1170,29 @@ static struct request *blk_mq_map_request(struct request_queue *q,
1169
1170
struct blk_mq_hw_ctx * hctx ;
1170
1171
struct blk_mq_ctx * ctx ;
1171
1172
struct request * rq ;
1172
- int rw = bio_data_dir (bio );
1173
+ int op = bio_data_dir (bio );
1174
+ int op_flags = 0 ;
1173
1175
struct blk_mq_alloc_data alloc_data ;
1174
1176
1175
1177
blk_queue_enter_live (q );
1176
1178
ctx = blk_mq_get_ctx (q );
1177
1179
hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
1178
1180
1179
1181
if (rw_is_sync (bio -> bi_rw ))
1180
- rw |= REQ_SYNC ;
1182
+ op_flags |= REQ_SYNC ;
1181
1183
1182
- trace_block_getrq (q , bio , rw );
1184
+ trace_block_getrq (q , bio , op );
1183
1185
blk_mq_set_alloc_data (& alloc_data , q , BLK_MQ_REQ_NOWAIT , ctx , hctx );
1184
- rq = __blk_mq_alloc_request (& alloc_data , rw );
1186
+ rq = __blk_mq_alloc_request (& alloc_data , op , op_flags );
1185
1187
if (unlikely (!rq )) {
1186
1188
__blk_mq_run_hw_queue (hctx );
1187
1189
blk_mq_put_ctx (ctx );
1188
- trace_block_sleeprq (q , bio , rw );
1190
+ trace_block_sleeprq (q , bio , op );
1189
1191
1190
1192
ctx = blk_mq_get_ctx (q );
1191
1193
hctx = q -> mq_ops -> map_queue (q , ctx -> cpu );
1192
1194
blk_mq_set_alloc_data (& alloc_data , q , 0 , ctx , hctx );
1193
- rq = __blk_mq_alloc_request (& alloc_data , rw );
1195
+ rq = __blk_mq_alloc_request (& alloc_data , op , op_flags );
1194
1196
ctx = alloc_data .ctx ;
1195
1197
hctx = alloc_data .hctx ;
1196
1198
}
0 commit comments