@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync)
959
959
* A request has just been released. Account for it, update the full and
960
960
* congestion status, wake up any waiters. Called under q->queue_lock.
961
961
*/
962
- static void freed_request (struct request_list * rl , unsigned int flags )
962
+ static void freed_request (struct request_list * rl , int op , unsigned int flags )
963
963
{
964
964
struct request_queue * q = rl -> q ;
965
- int sync = rw_is_sync (flags );
965
+ int sync = rw_is_sync (op | flags );
966
966
967
967
q -> nr_rqs [sync ]-- ;
968
968
rl -> count [sync ]-- ;
@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio)
1054
1054
/**
1055
1055
* __get_request - get a free request
1056
1056
* @rl: request list to allocate from
1057
- * @rw_flags: RW and SYNC flags
1057
+ * @op: REQ_OP_READ/REQ_OP_WRITE
1058
+ * @op_flags: rq_flag_bits
1058
1059
* @bio: bio to allocate request for (can be %NULL)
1059
1060
* @gfp_mask: allocation mask
1060
1061
*
@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio)
1065
1066
* Returns ERR_PTR on failure, with @q->queue_lock held.
1066
1067
* Returns request pointer on success, with @q->queue_lock *not held*.
1067
1068
*/
1068
- static struct request * __get_request (struct request_list * rl , int rw_flags ,
1069
- struct bio * bio , gfp_t gfp_mask )
1069
+ static struct request * __get_request (struct request_list * rl , int op ,
1070
+ int op_flags , struct bio * bio ,
1071
+ gfp_t gfp_mask )
1070
1072
{
1071
1073
struct request_queue * q = rl -> q ;
1072
1074
struct request * rq ;
1073
1075
struct elevator_type * et = q -> elevator -> type ;
1074
1076
struct io_context * ioc = rq_ioc (bio );
1075
1077
struct io_cq * icq = NULL ;
1076
- const bool is_sync = rw_is_sync (rw_flags ) != 0 ;
1078
+ const bool is_sync = rw_is_sync (op | op_flags ) != 0 ;
1077
1079
int may_queue ;
1078
1080
1079
1081
if (unlikely (blk_queue_dying (q )))
1080
1082
return ERR_PTR (- ENODEV );
1081
1083
1082
- may_queue = elv_may_queue (q , rw_flags );
1084
+ may_queue = elv_may_queue (q , op | op_flags );
1083
1085
if (may_queue == ELV_MQUEUE_NO )
1084
1086
goto rq_starved ;
1085
1087
@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1123
1125
1124
1126
/*
1125
1127
* Decide whether the new request will be managed by elevator. If
1126
- * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
1128
+ * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
1127
1129
* prevent the current elevator from being destroyed until the new
1128
1130
* request is freed. This guarantees icq's won't be destroyed and
1129
1131
* makes creating new ones safe.
@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1132
1134
* it will be created after releasing queue_lock.
1133
1135
*/
1134
1136
if (blk_rq_should_init_elevator (bio ) && !blk_queue_bypass (q )) {
1135
- rw_flags |= REQ_ELVPRIV ;
1137
+ op_flags |= REQ_ELVPRIV ;
1136
1138
q -> nr_rqs_elvpriv ++ ;
1137
1139
if (et -> icq_cache && ioc )
1138
1140
icq = ioc_lookup_icq (ioc , q );
1139
1141
}
1140
1142
1141
1143
if (blk_queue_io_stat (q ))
1142
- rw_flags |= REQ_IO_STAT ;
1144
+ op_flags |= REQ_IO_STAT ;
1143
1145
spin_unlock_irq (q -> queue_lock );
1144
1146
1145
1147
/* allocate and init request */
@@ -1149,10 +1151,10 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1149
1151
1150
1152
blk_rq_init (q , rq );
1151
1153
blk_rq_set_rl (rq , rl );
1152
- rq -> cmd_flags = rw_flags | REQ_ALLOCED ;
1154
+ req_set_op_attrs ( rq , op , op_flags | REQ_ALLOCED ) ;
1153
1155
1154
1156
/* init elvpriv */
1155
- if (rw_flags & REQ_ELVPRIV ) {
1157
+ if (op_flags & REQ_ELVPRIV ) {
1156
1158
if (unlikely (et -> icq_cache && !icq )) {
1157
1159
if (ioc )
1158
1160
icq = ioc_create_icq (ioc , q , gfp_mask );
@@ -1178,7 +1180,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1178
1180
if (ioc_batching (q , ioc ))
1179
1181
ioc -> nr_batch_requests -- ;
1180
1182
1181
- trace_block_getrq (q , bio , rw_flags & 1 );
1183
+ trace_block_getrq (q , bio , op );
1182
1184
return rq ;
1183
1185
1184
1186
fail_elvpriv :
@@ -1208,7 +1210,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1208
1210
* queue, but this is pretty rare.
1209
1211
*/
1210
1212
spin_lock_irq (q -> queue_lock );
1211
- freed_request (rl , rw_flags );
1213
+ freed_request (rl , op , op_flags );
1212
1214
1213
1215
/*
1214
1216
* in the very unlikely event that allocation failed and no
@@ -1226,7 +1228,8 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1226
1228
/**
1227
1229
* get_request - get a free request
1228
1230
* @q: request_queue to allocate request from
1229
- * @rw_flags: RW and SYNC flags
1231
+ * @op: REQ_OP_READ/REQ_OP_WRITE
1232
+ * @op_flags: rq_flag_bits
1230
1233
* @bio: bio to allocate request for (can be %NULL)
1231
1234
* @gfp_mask: allocation mask
1232
1235
*
@@ -1237,17 +1240,18 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
1237
1240
* Returns ERR_PTR on failure, with @q->queue_lock held.
1238
1241
* Returns request pointer on success, with @q->queue_lock *not held*.
1239
1242
*/
1240
- static struct request * get_request (struct request_queue * q , int rw_flags ,
1241
- struct bio * bio , gfp_t gfp_mask )
1243
+ static struct request * get_request (struct request_queue * q , int op ,
1244
+ int op_flags , struct bio * bio ,
1245
+ gfp_t gfp_mask )
1242
1246
{
1243
- const bool is_sync = rw_is_sync (rw_flags ) != 0 ;
1247
+ const bool is_sync = rw_is_sync (op | op_flags ) != 0 ;
1244
1248
DEFINE_WAIT (wait );
1245
1249
struct request_list * rl ;
1246
1250
struct request * rq ;
1247
1251
1248
1252
rl = blk_get_rl (q , bio ); /* transferred to @rq on success */
1249
1253
retry :
1250
- rq = __get_request (rl , rw_flags , bio , gfp_mask );
1254
+ rq = __get_request (rl , op , op_flags , bio , gfp_mask );
1251
1255
if (!IS_ERR (rq ))
1252
1256
return rq ;
1253
1257
@@ -1260,7 +1264,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
1260
1264
prepare_to_wait_exclusive (& rl -> wait [is_sync ], & wait ,
1261
1265
TASK_UNINTERRUPTIBLE );
1262
1266
1263
- trace_block_sleeprq (q , bio , rw_flags & 1 );
1267
+ trace_block_sleeprq (q , bio , op );
1264
1268
1265
1269
spin_unlock_irq (q -> queue_lock );
1266
1270
io_schedule ();
@@ -1289,7 +1293,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
1289
1293
create_io_context (gfp_mask , q -> node );
1290
1294
1291
1295
spin_lock_irq (q -> queue_lock );
1292
- rq = get_request (q , rw , NULL , gfp_mask );
1296
+ rq = get_request (q , rw , 0 , NULL , gfp_mask );
1293
1297
if (IS_ERR (rq ))
1294
1298
spin_unlock_irq (q -> queue_lock );
1295
1299
/* q->queue_lock is unlocked at this point */
@@ -1491,13 +1495,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1491
1495
*/
1492
1496
if (req -> cmd_flags & REQ_ALLOCED ) {
1493
1497
unsigned int flags = req -> cmd_flags ;
1498
+ int op = req_op (req );
1494
1499
struct request_list * rl = blk_rq_rl (req );
1495
1500
1496
1501
BUG_ON (!list_empty (& req -> queuelist ));
1497
1502
BUG_ON (ELV_ON_HASH (req ));
1498
1503
1499
1504
blk_free_request (rl , req );
1500
- freed_request (rl , flags );
1505
+ freed_request (rl , op , flags );
1501
1506
blk_put_rl (rl );
1502
1507
}
1503
1508
}
@@ -1712,7 +1717,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1712
1717
{
1713
1718
const bool sync = !!(bio -> bi_rw & REQ_SYNC );
1714
1719
struct blk_plug * plug ;
1715
- int el_ret , rw_flags , where = ELEVATOR_INSERT_SORT ;
1720
+ int el_ret , rw_flags = 0 , where = ELEVATOR_INSERT_SORT ;
1716
1721
struct request * req ;
1717
1722
unsigned int request_count = 0 ;
1718
1723
@@ -1772,15 +1777,14 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1772
1777
* but we need to set it earlier to expose the sync flag to the
1773
1778
* rq allocator and io schedulers.
1774
1779
*/
1775
- rw_flags = bio_data_dir (bio );
1776
1780
if (sync )
1777
1781
rw_flags |= REQ_SYNC ;
1778
1782
1779
1783
/*
1780
1784
* Grab a free request. This is might sleep but can not fail.
1781
1785
* Returns with the queue unlocked.
1782
1786
*/
1783
- req = get_request (q , rw_flags , bio , GFP_NOIO );
1787
+ req = get_request (q , bio_data_dir ( bio ), rw_flags , bio , GFP_NOIO );
1784
1788
if (IS_ERR (req )) {
1785
1789
bio -> bi_error = PTR_ERR (req );
1786
1790
bio_endio (bio );
0 commit comments