@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
100
100
static void nvme_ns_remove (struct nvme_ns * ns );
101
101
static int nvme_revalidate_disk (struct gendisk * disk );
102
102
static void nvme_put_subsystem (struct nvme_subsystem * subsys );
103
+ static void nvme_remove_invalid_namespaces (struct nvme_ctrl * ctrl ,
104
+ unsigned nsid );
105
+
106
+ static void nvme_set_queue_dying (struct nvme_ns * ns )
107
+ {
108
+ /*
109
+ * Revalidating a dead namespace sets capacity to 0. This will end
110
+ * buffered writers dirtying pages that can't be synced.
111
+ */
112
+ if (!ns -> disk || test_and_set_bit (NVME_NS_DEAD , & ns -> flags ))
113
+ return ;
114
+ revalidate_disk (ns -> disk );
115
+ blk_set_queue_dying (ns -> queue );
116
+ /* Forcibly unquiesce queues to avoid blocking dispatch */
117
+ blk_mq_unquiesce_queue (ns -> queue );
118
+ }
103
119
104
120
static void nvme_queue_scan (struct nvme_ctrl * ctrl )
105
121
{
@@ -1151,19 +1167,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1151
1167
1152
1168
static void nvme_update_formats (struct nvme_ctrl * ctrl )
1153
1169
{
1154
- struct nvme_ns * ns , * next ;
1155
- LIST_HEAD (rm_list );
1170
+ struct nvme_ns * ns ;
1156
1171
1157
- down_write (& ctrl -> namespaces_rwsem );
1158
- list_for_each_entry (ns , & ctrl -> namespaces , list ) {
1159
- if (ns -> disk && nvme_revalidate_disk (ns -> disk )) {
1160
- list_move_tail (& ns -> list , & rm_list );
1161
- }
1162
- }
1163
- up_write (& ctrl -> namespaces_rwsem );
1172
+ down_read (& ctrl -> namespaces_rwsem );
1173
+ list_for_each_entry (ns , & ctrl -> namespaces , list )
1174
+ if (ns -> disk && nvme_revalidate_disk (ns -> disk ))
1175
+ nvme_set_queue_dying (ns );
1176
+ up_read (& ctrl -> namespaces_rwsem );
1164
1177
1165
- list_for_each_entry_safe (ns , next , & rm_list , list )
1166
- nvme_ns_remove (ns );
1178
+ nvme_remove_invalid_namespaces (ctrl , NVME_NSID_ALL );
1167
1179
}
1168
1180
1169
1181
static void nvme_passthru_end (struct nvme_ctrl * ctrl , u32 effects )
@@ -3138,7 +3150,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3138
3150
3139
3151
down_write (& ctrl -> namespaces_rwsem );
3140
3152
list_for_each_entry_safe (ns , next , & ctrl -> namespaces , list ) {
3141
- if (ns -> head -> ns_id > nsid )
3153
+ if (ns -> head -> ns_id > nsid || test_bit ( NVME_NS_DEAD , & ns -> flags ) )
3142
3154
list_move_tail (& ns -> list , & rm_list );
3143
3155
}
3144
3156
up_write (& ctrl -> namespaces_rwsem );
@@ -3542,19 +3554,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
3542
3554
if (ctrl -> admin_q )
3543
3555
blk_mq_unquiesce_queue (ctrl -> admin_q );
3544
3556
3545
- list_for_each_entry (ns , & ctrl -> namespaces , list ) {
3546
- /*
3547
- * Revalidating a dead namespace sets capacity to 0. This will
3548
- * end buffered writers dirtying pages that can't be synced.
3549
- */
3550
- if (!ns -> disk || test_and_set_bit (NVME_NS_DEAD , & ns -> flags ))
3551
- continue ;
3552
- revalidate_disk (ns -> disk );
3553
- blk_set_queue_dying (ns -> queue );
3557
+ list_for_each_entry (ns , & ctrl -> namespaces , list )
3558
+ nvme_set_queue_dying (ns );
3554
3559
3555
- /* Forcibly unquiesce queues to avoid blocking dispatch */
3556
- blk_mq_unquiesce_queue (ns -> queue );
3557
- }
3558
3560
up_read (& ctrl -> namespaces_rwsem );
3559
3561
}
3560
3562
EXPORT_SYMBOL_GPL (nvme_kill_queues );
0 commit comments