@@ -1172,19 +1172,30 @@ static bool is_ses_good(struct cifs_ses *ses)
1172
1172
return ret ;
1173
1173
}
1174
1174
1175
- /* Refresh dfs referral of tcon and mark it for reconnect if needed */
1176
- static int __refresh_tcon ( const char * path , struct cifs_ses * ses , bool force_refresh )
1175
+ /* Refresh dfs referral of @ses and mark it for reconnect if needed */
1176
+ static void __refresh_ses_referral ( struct cifs_ses * ses , bool force_refresh )
1177
1177
{
1178
1178
struct TCP_Server_Info * server = ses -> server ;
1179
1179
DFS_CACHE_TGT_LIST (old_tl );
1180
1180
DFS_CACHE_TGT_LIST (new_tl );
1181
1181
bool needs_refresh = false;
1182
1182
struct cache_entry * ce ;
1183
1183
unsigned int xid ;
1184
+ char * path = NULL ;
1184
1185
int rc = 0 ;
1185
1186
1186
1187
xid = get_xid ();
1187
1188
1189
+ mutex_lock (& server -> refpath_lock );
1190
+ if (server -> leaf_fullpath ) {
1191
+ path = kstrdup (server -> leaf_fullpath + 1 , GFP_ATOMIC );
1192
+ if (!path )
1193
+ rc = - ENOMEM ;
1194
+ }
1195
+ mutex_unlock (& server -> refpath_lock );
1196
+ if (!path )
1197
+ goto out ;
1198
+
1188
1199
down_read (& htable_rw_lock );
1189
1200
ce = lookup_cache_entry (path );
1190
1201
needs_refresh = force_refresh || IS_ERR (ce ) || cache_entry_expired (ce );
@@ -1218,19 +1229,17 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
1218
1229
free_xid (xid );
1219
1230
dfs_cache_free_tgts (& old_tl );
1220
1231
dfs_cache_free_tgts (& new_tl );
1221
- return rc ;
1232
+ kfree ( path ) ;
1222
1233
}
1223
1234
1224
- static int refresh_tcon (struct cifs_tcon * tcon , bool force_refresh )
1235
+ static inline void refresh_ses_referral (struct cifs_ses * ses )
1225
1236
{
1226
- struct TCP_Server_Info * server = tcon -> ses -> server ;
1227
- struct cifs_ses * ses = tcon -> ses ;
1237
+ __refresh_ses_referral ( ses , false) ;
1238
+ }
1228
1239
1229
- mutex_lock (& server -> refpath_lock );
1230
- if (server -> leaf_fullpath )
1231
- __refresh_tcon (server -> leaf_fullpath + 1 , ses , force_refresh );
1232
- mutex_unlock (& server -> refpath_lock );
1233
- return 0 ;
1240
+ static inline void force_refresh_ses_referral (struct cifs_ses * ses )
1241
+ {
1242
+ __refresh_ses_referral (ses , true);
1234
1243
}
1235
1244
1236
1245
/**
@@ -1271,25 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1271
1280
*/
1272
1281
cifs_sb -> mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH ;
1273
1282
1274
- return refresh_tcon (tcon , true);
1283
+ force_refresh_ses_referral (tcon -> ses );
1284
+ return 0 ;
1275
1285
}
1276
1286
1277
1287
/* Refresh all DFS referrals related to DFS tcon */
1278
1288
void dfs_cache_refresh (struct work_struct * work )
1279
1289
{
1280
- struct TCP_Server_Info * server ;
1281
1290
struct cifs_tcon * tcon ;
1282
1291
struct cifs_ses * ses ;
1283
1292
1284
1293
tcon = container_of (work , struct cifs_tcon , dfs_cache_work .work );
1285
1294
1286
- for (ses = tcon -> ses ; ses ; ses = ses -> dfs_root_ses ) {
1287
- server = ses -> server ;
1288
- mutex_lock (& server -> refpath_lock );
1289
- if (server -> leaf_fullpath )
1290
- __refresh_tcon (server -> leaf_fullpath + 1 , ses , false);
1291
- mutex_unlock (& server -> refpath_lock );
1292
- }
1295
+ for (ses = tcon -> ses ; ses ; ses = ses -> dfs_root_ses )
1296
+ refresh_ses_referral (ses );
1293
1297
1294
1298
queue_delayed_work (dfscache_wq , & tcon -> dfs_cache_work ,
1295
1299
atomic_read (& dfs_cache_ttl ) * HZ );
0 commit comments