@@ -1104,58 +1104,59 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1104
1104
struct request * rq )
1105
1105
{
1106
1106
struct blk_mq_hw_ctx * this_hctx = * hctx ;
1107
- bool shared_tags = (this_hctx -> flags & BLK_MQ_F_TAG_SHARED ) != 0 ;
1108
1107
struct sbq_wait_state * ws ;
1109
1108
wait_queue_entry_t * wait ;
1110
1109
bool ret ;
1111
1110
1112
- if (!shared_tags ) {
1111
+ if (!( this_hctx -> flags & BLK_MQ_F_TAG_SHARED ) ) {
1113
1112
if (!test_bit (BLK_MQ_S_SCHED_RESTART , & this_hctx -> state ))
1114
1113
set_bit (BLK_MQ_S_SCHED_RESTART , & this_hctx -> state );
1115
- } else {
1116
- wait = & this_hctx -> dispatch_wait ;
1117
- if (!list_empty_careful (& wait -> entry ))
1118
- return false;
1119
1114
1120
- spin_lock (& this_hctx -> lock );
1121
- if (!list_empty (& wait -> entry )) {
1122
- spin_unlock (& this_hctx -> lock );
1123
- return false;
1124
- }
1115
+ /*
1116
+ * It's possible that a tag was freed in the window between the
1117
+ * allocation failure and adding the hardware queue to the wait
1118
+ * queue.
1119
+ *
1120
+ * Don't clear RESTART here, someone else could have set it.
1121
+ * At most this will cost an extra queue run.
1122
+ */
1123
+ return blk_mq_get_driver_tag (rq , hctx , false);
1124
+ }
1125
+
1126
+ wait = & this_hctx -> dispatch_wait ;
1127
+ if (!list_empty_careful (& wait -> entry ))
1128
+ return false;
1125
1129
1126
- ws = bt_wait_ptr (& this_hctx -> tags -> bitmap_tags , this_hctx );
1127
- add_wait_queue (& ws -> wait , wait );
1130
+ spin_lock (& this_hctx -> lock );
1131
+ if (!list_empty (& wait -> entry )) {
1132
+ spin_unlock (& this_hctx -> lock );
1133
+ return false;
1128
1134
}
1129
1135
1136
+ ws = bt_wait_ptr (& this_hctx -> tags -> bitmap_tags , this_hctx );
1137
+ add_wait_queue (& ws -> wait , wait );
1138
+
1130
1139
/*
1131
1140
* It's possible that a tag was freed in the window between the
1132
1141
* allocation failure and adding the hardware queue to the wait
1133
1142
* queue.
1134
1143
*/
1135
1144
ret = blk_mq_get_driver_tag (rq , hctx , false);
1136
-
1137
- if (!shared_tags ) {
1138
- /*
1139
- * Don't clear RESTART here, someone else could have set it.
1140
- * At most this will cost an extra queue run.
1141
- */
1142
- return ret ;
1143
- } else {
1144
- if (!ret ) {
1145
- spin_unlock (& this_hctx -> lock );
1146
- return false;
1147
- }
1148
-
1149
- /*
1150
- * We got a tag, remove ourselves from the wait queue to ensure
1151
- * someone else gets the wakeup.
1152
- */
1153
- spin_lock_irq (& ws -> wait .lock );
1154
- list_del_init (& wait -> entry );
1155
- spin_unlock_irq (& ws -> wait .lock );
1145
+ if (!ret ) {
1156
1146
spin_unlock (& this_hctx -> lock );
1157
- return true ;
1147
+ return false ;
1158
1148
}
1149
+
1150
+ /*
1151
+ * We got a tag, remove ourselves from the wait queue to ensure
1152
+ * someone else gets the wakeup.
1153
+ */
1154
+ spin_lock_irq (& ws -> wait .lock );
1155
+ list_del_init (& wait -> entry );
1156
+ spin_unlock_irq (& ws -> wait .lock );
1157
+ spin_unlock (& this_hctx -> lock );
1158
+
1159
+ return true;
1159
1160
}
1160
1161
1161
1162
bool blk_mq_dispatch_rq_list (struct request_queue * q , struct list_head * list ,
0 commit comments