@@ -1095,16 +1095,18 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1095
1095
return 0 ;
1096
1096
}
1097
1097
1098
- static bool target_share_equal (struct TCP_Server_Info * server , const char * s1 , const char * s2 )
1098
+ static bool target_share_equal (struct cifs_tcon * tcon , const char * s1 )
1099
1099
{
1100
- char unc [sizeof ("\\\\" ) + SERVER_NAME_LENGTH ] = {0 };
1100
+ struct TCP_Server_Info * server = tcon -> ses -> server ;
1101
+ struct sockaddr_storage ss ;
1101
1102
const char * host ;
1103
+ const char * s2 = & tcon -> tree_name [1 ];
1102
1104
size_t hostlen ;
1103
- struct sockaddr_storage ss ;
1105
+ char unc [ sizeof ( "\\\\" ) + SERVER_NAME_LENGTH ] = { 0 } ;
1104
1106
bool match ;
1105
1107
int rc ;
1106
1108
1107
- if (strcasecmp (s1 , s2 ))
1109
+ if (strcasecmp (s2 , s1 ))
1108
1110
return false;
1109
1111
1110
1112
/*
@@ -1128,34 +1130,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
1128
1130
return match ;
1129
1131
}
1130
1132
1131
- /*
1132
- * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1133
- * target shares in @refs.
1134
- */
1135
- static void mark_for_reconnect_if_needed (struct TCP_Server_Info * server ,
1136
- const char * path ,
1137
- struct dfs_cache_tgt_list * old_tl ,
1138
- struct dfs_cache_tgt_list * new_tl )
1139
- {
1140
- struct dfs_cache_tgt_iterator * oit , * nit ;
1141
-
1142
- for (oit = dfs_cache_get_tgt_iterator (old_tl ); oit ;
1143
- oit = dfs_cache_get_next_tgt (old_tl , oit )) {
1144
- for (nit = dfs_cache_get_tgt_iterator (new_tl ); nit ;
1145
- nit = dfs_cache_get_next_tgt (new_tl , nit )) {
1146
- if (target_share_equal (server ,
1147
- dfs_cache_get_tgt_name (oit ),
1148
- dfs_cache_get_tgt_name (nit ))) {
1149
- dfs_cache_noreq_update_tgthint (path , nit );
1150
- return ;
1151
- }
1152
- }
1153
- }
1154
-
1155
- cifs_dbg (FYI , "%s: no cached or matched targets. mark dfs share for reconnect.\n" , __func__ );
1156
- cifs_signal_cifsd_for_reconnect (server , true);
1157
- }
1158
-
1159
1133
static bool is_ses_good (struct cifs_ses * ses )
1160
1134
{
1161
1135
struct TCP_Server_Info * server = ses -> server ;
@@ -1172,41 +1146,35 @@ static bool is_ses_good(struct cifs_ses *ses)
1172
1146
return ret ;
1173
1147
}
1174
1148
1175
- /* Refresh dfs referral of @ses and mark it for reconnect if needed */
1176
- static void __refresh_ses_referral (struct cifs_ses * ses , bool force_refresh )
1149
+ static char * get_ses_refpath (struct cifs_ses * ses )
1177
1150
{
1178
1151
struct TCP_Server_Info * server = ses -> server ;
1179
- DFS_CACHE_TGT_LIST (old_tl );
1180
- DFS_CACHE_TGT_LIST (new_tl );
1181
- bool needs_refresh = false;
1182
- struct cache_entry * ce ;
1183
- unsigned int xid ;
1184
- char * path = NULL ;
1185
- int rc = 0 ;
1186
-
1187
- xid = get_xid ();
1152
+ char * path = ERR_PTR (- ENOENT );
1188
1153
1189
1154
mutex_lock (& server -> refpath_lock );
1190
1155
if (server -> leaf_fullpath ) {
1191
1156
path = kstrdup (server -> leaf_fullpath + 1 , GFP_ATOMIC );
1192
1157
if (!path )
1193
- rc = - ENOMEM ;
1158
+ path = ERR_PTR ( - ENOMEM ) ;
1194
1159
}
1195
1160
mutex_unlock (& server -> refpath_lock );
1196
- if (! path )
1197
- goto out ;
1161
+ return path ;
1162
+ }
1198
1163
1199
- down_read (& htable_rw_lock );
1200
- ce = lookup_cache_entry (path );
1201
- needs_refresh = force_refresh || IS_ERR (ce ) || cache_entry_expired (ce );
1202
- if (!IS_ERR (ce )) {
1203
- rc = get_targets (ce , & old_tl );
1204
- cifs_dbg (FYI , "%s: get_targets: %d\n" , __func__ , rc );
1205
- }
1206
- up_read (& htable_rw_lock );
1164
+ /* Refresh dfs referral of @ses */
1165
+ static void refresh_ses_referral (struct cifs_ses * ses )
1166
+ {
1167
+ struct cache_entry * ce ;
1168
+ unsigned int xid ;
1169
+ char * path ;
1170
+ int rc = 0 ;
1207
1171
1208
- if (!needs_refresh ) {
1209
- rc = 0 ;
1172
+ xid = get_xid ();
1173
+
1174
+ path = get_ses_refpath (ses );
1175
+ if (IS_ERR (path )) {
1176
+ rc = PTR_ERR (path );
1177
+ path = NULL ;
1210
1178
goto out ;
1211
1179
}
1212
1180
@@ -1217,29 +1185,106 @@ static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
1217
1185
goto out ;
1218
1186
}
1219
1187
1220
- ce = cache_refresh_path (xid , ses , path , true);
1221
- if (!IS_ERR (ce )) {
1222
- rc = get_targets (ce , & new_tl );
1188
+ ce = cache_refresh_path (xid , ses , path , false);
1189
+ if (!IS_ERR (ce ))
1223
1190
up_read (& htable_rw_lock );
1224
- cifs_dbg (FYI , "%s: get_targets: %d\n" , __func__ , rc );
1225
- mark_for_reconnect_if_needed (server , path , & old_tl , & new_tl );
1226
- }
1191
+ else
1192
+ rc = PTR_ERR (ce );
1227
1193
1228
1194
out :
1229
1195
free_xid (xid );
1230
- dfs_cache_free_tgts (& old_tl );
1231
- dfs_cache_free_tgts (& new_tl );
1232
1196
kfree (path );
1233
1197
}
1234
1198
1235
- static inline void refresh_ses_referral (struct cifs_ses * ses )
1199
+ static int __refresh_tcon_referral (struct cifs_tcon * tcon ,
1200
+ const char * path ,
1201
+ struct dfs_info3_param * refs ,
1202
+ int numrefs , bool force_refresh )
1236
1203
{
1237
- __refresh_ses_referral (ses , false);
1204
+ struct cache_entry * ce ;
1205
+ bool reconnect = force_refresh ;
1206
+ int rc = 0 ;
1207
+ int i ;
1208
+
1209
+ if (unlikely (!numrefs ))
1210
+ return 0 ;
1211
+
1212
+ if (force_refresh ) {
1213
+ for (i = 0 ; i < numrefs ; i ++ ) {
1214
+ /* TODO: include prefix paths in the matching */
1215
+ if (target_share_equal (tcon , refs [i ].node_name )) {
1216
+ reconnect = false;
1217
+ break ;
1218
+ }
1219
+ }
1220
+ }
1221
+
1222
+ down_write (& htable_rw_lock );
1223
+ ce = lookup_cache_entry (path );
1224
+ if (!IS_ERR (ce )) {
1225
+ if (force_refresh || cache_entry_expired (ce ))
1226
+ rc = update_cache_entry_locked (ce , refs , numrefs );
1227
+ } else if (PTR_ERR (ce ) == - ENOENT ) {
1228
+ ce = add_cache_entry_locked (refs , numrefs );
1229
+ }
1230
+ up_write (& htable_rw_lock );
1231
+
1232
+ if (IS_ERR (ce ))
1233
+ rc = PTR_ERR (ce );
1234
+ if (reconnect ) {
1235
+ cifs_tcon_dbg (FYI , "%s: mark for reconnect\n" , __func__ );
1236
+ cifs_signal_cifsd_for_reconnect (tcon -> ses -> server , true);
1237
+ }
1238
+ return rc ;
1238
1239
}
1239
1240
1240
- static inline void force_refresh_ses_referral (struct cifs_ses * ses )
1241
+ static void refresh_tcon_referral (struct cifs_tcon * tcon , bool force_refresh )
1241
1242
{
1242
- __refresh_ses_referral (ses , true);
1243
+ struct dfs_info3_param * refs = NULL ;
1244
+ struct cache_entry * ce ;
1245
+ struct cifs_ses * ses ;
1246
+ unsigned int xid ;
1247
+ bool needs_refresh ;
1248
+ char * path ;
1249
+ int numrefs = 0 ;
1250
+ int rc = 0 ;
1251
+
1252
+ xid = get_xid ();
1253
+ ses = tcon -> ses ;
1254
+
1255
+ path = get_ses_refpath (ses );
1256
+ if (IS_ERR (path )) {
1257
+ rc = PTR_ERR (path );
1258
+ path = NULL ;
1259
+ goto out ;
1260
+ }
1261
+
1262
+ down_read (& htable_rw_lock );
1263
+ ce = lookup_cache_entry (path );
1264
+ needs_refresh = force_refresh || IS_ERR (ce ) || cache_entry_expired (ce );
1265
+ if (!needs_refresh ) {
1266
+ up_read (& htable_rw_lock );
1267
+ goto out ;
1268
+ }
1269
+ up_read (& htable_rw_lock );
1270
+
1271
+ ses = CIFS_DFS_ROOT_SES (ses );
1272
+ if (!is_ses_good (ses )) {
1273
+ cifs_dbg (FYI , "%s: skip cache refresh due to disconnected ipc\n" ,
1274
+ __func__ );
1275
+ goto out ;
1276
+ }
1277
+
1278
+ rc = get_dfs_referral (xid , ses , path , & refs , & numrefs );
1279
+ if (!rc ) {
1280
+ rc = __refresh_tcon_referral (tcon , path , refs ,
1281
+ numrefs , force_refresh );
1282
+ }
1283
+
1284
+ out :
1285
+ free_xid (xid );
1286
+ kfree (path );
1287
+ free_dfs_info_array (refs , numrefs );
1243
1288
}
1244
1289
1245
1290
/**
@@ -1280,7 +1325,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1280
1325
*/
1281
1326
cifs_sb -> mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH ;
1282
1327
1283
- force_refresh_ses_referral (tcon -> ses );
1328
+ refresh_tcon_referral (tcon , true );
1284
1329
return 0 ;
1285
1330
}
1286
1331
@@ -1291,9 +1336,11 @@ void dfs_cache_refresh(struct work_struct *work)
1291
1336
struct cifs_ses * ses ;
1292
1337
1293
1338
tcon = container_of (work , struct cifs_tcon , dfs_cache_work .work );
1339
+ ses = tcon -> ses -> dfs_root_ses ;
1294
1340
1295
- for (ses = tcon -> ses ; ses ; ses = ses -> dfs_root_ses )
1341
+ for (; ses ; ses = ses -> dfs_root_ses )
1296
1342
refresh_ses_referral (ses );
1343
+ refresh_tcon_referral (tcon , false);
1297
1344
1298
1345
queue_delayed_work (dfscache_wq , & tcon -> dfs_cache_work ,
1299
1346
atomic_read (& dfs_cache_ttl ) * HZ );
0 commit comments