@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
100
100
static void nvme_ns_remove (struct nvme_ns * ns );
101
101
static int nvme_revalidate_disk (struct gendisk * disk );
102
102
static void nvme_put_subsystem (struct nvme_subsystem * subsys );
103
+ static void nvme_remove_invalid_namespaces (struct nvme_ctrl * ctrl ,
104
+ unsigned nsid );
105
+
106
+ static void nvme_set_queue_dying (struct nvme_ns * ns )
107
+ {
108
+ /*
109
+ * Revalidating a dead namespace sets capacity to 0. This will end
110
+ * buffered writers dirtying pages that can't be synced.
111
+ */
112
+ if (!ns -> disk || test_and_set_bit (NVME_NS_DEAD , & ns -> flags ))
113
+ return ;
114
+ revalidate_disk (ns -> disk );
115
+ blk_set_queue_dying (ns -> queue );
116
+ /* Forcibly unquiesce queues to avoid blocking dispatch */
117
+ blk_mq_unquiesce_queue (ns -> queue );
118
+ }
103
119
104
120
static void nvme_queue_scan (struct nvme_ctrl * ctrl )
105
121
{
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1044
1060
1045
1061
static void nvme_enable_aen (struct nvme_ctrl * ctrl )
1046
1062
{
1047
- u32 result ;
1063
+ u32 result , supported_aens = ctrl -> oaes & NVME_AEN_SUPPORTED ;
1048
1064
int status ;
1049
1065
1050
- status = nvme_set_features (ctrl , NVME_FEAT_ASYNC_EVENT ,
1051
- ctrl -> oaes & NVME_AEN_SUPPORTED , NULL , 0 , & result );
1066
+ if (!supported_aens )
1067
+ return ;
1068
+
1069
+ status = nvme_set_features (ctrl , NVME_FEAT_ASYNC_EVENT , supported_aens ,
1070
+ NULL , 0 , & result );
1052
1071
if (status )
1053
1072
dev_warn (ctrl -> device , "Failed to configure AEN (cfg %x)\n" ,
1054
- ctrl -> oaes & NVME_AEN_SUPPORTED );
1073
+ supported_aens );
1055
1074
}
1056
1075
1057
1076
static int nvme_submit_io (struct nvme_ns * ns , struct nvme_user_io __user * uio )
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1151
1170
1152
1171
static void nvme_update_formats (struct nvme_ctrl * ctrl )
1153
1172
{
1154
- struct nvme_ns * ns , * next ;
1155
- LIST_HEAD (rm_list );
1173
+ struct nvme_ns * ns ;
1156
1174
1157
- down_write (& ctrl -> namespaces_rwsem );
1158
- list_for_each_entry (ns , & ctrl -> namespaces , list ) {
1159
- if (ns -> disk && nvme_revalidate_disk (ns -> disk )) {
1160
- list_move_tail (& ns -> list , & rm_list );
1161
- }
1162
- }
1163
- up_write (& ctrl -> namespaces_rwsem );
1175
+ down_read (& ctrl -> namespaces_rwsem );
1176
+ list_for_each_entry (ns , & ctrl -> namespaces , list )
1177
+ if (ns -> disk && nvme_revalidate_disk (ns -> disk ))
1178
+ nvme_set_queue_dying (ns );
1179
+ up_read (& ctrl -> namespaces_rwsem );
1164
1180
1165
- list_for_each_entry_safe (ns , next , & rm_list , list )
1166
- nvme_ns_remove (ns );
1181
+ nvme_remove_invalid_namespaces (ctrl , NVME_NSID_ALL );
1167
1182
}
1168
1183
1169
1184
static void nvme_passthru_end (struct nvme_ctrl * ctrl , u32 effects )
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1218
1233
effects = nvme_passthru_start (ctrl , ns , cmd .opcode );
1219
1234
status = nvme_submit_user_cmd (ns ? ns -> queue : ctrl -> admin_q , & c ,
1220
1235
(void __user * )(uintptr_t )cmd .addr , cmd .data_len ,
1221
- (void __user * )(uintptr_t )cmd .metadata , cmd .metadata ,
1236
+ (void __user * )(uintptr_t )cmd .metadata , cmd .metadata_len ,
1222
1237
0 , & cmd .result , timeout );
1223
1238
nvme_passthru_end (ctrl , effects );
1224
1239
@@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3138
3153
3139
3154
down_write (& ctrl -> namespaces_rwsem );
3140
3155
list_for_each_entry_safe (ns , next , & ctrl -> namespaces , list ) {
3141
- if (ns -> head -> ns_id > nsid )
3156
+ if (ns -> head -> ns_id > nsid || test_bit ( NVME_NS_DEAD , & ns -> flags ) )
3142
3157
list_move_tail (& ns -> list , & rm_list );
3143
3158
}
3144
3159
up_write (& ctrl -> namespaces_rwsem );
@@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
3542
3557
if (ctrl -> admin_q )
3543
3558
blk_mq_unquiesce_queue (ctrl -> admin_q );
3544
3559
3545
- list_for_each_entry (ns , & ctrl -> namespaces , list ) {
3546
- /*
3547
- * Revalidating a dead namespace sets capacity to 0. This will
3548
- * end buffered writers dirtying pages that can't be synced.
3549
- */
3550
- if (!ns -> disk || test_and_set_bit (NVME_NS_DEAD , & ns -> flags ))
3551
- continue ;
3552
- revalidate_disk (ns -> disk );
3553
- blk_set_queue_dying (ns -> queue );
3560
+ list_for_each_entry (ns , & ctrl -> namespaces , list )
3561
+ nvme_set_queue_dying (ns );
3554
3562
3555
- /* Forcibly unquiesce queues to avoid blocking dispatch */
3556
- blk_mq_unquiesce_queue (ns -> queue );
3557
- }
3558
3563
up_read (& ctrl -> namespaces_rwsem );
3559
3564
}
3560
3565
EXPORT_SYMBOL_GPL (nvme_kill_queues );
0 commit comments