20
20
#include "cifs_unicode.h"
21
21
#include "smb2glob.h"
22
22
#include "dns_resolve.h"
23
+ #include "dfs.h"
23
24
24
25
#include "dfs_cache.h"
25
26
26
- #define CACHE_HTABLE_SIZE 32
27
- #define CACHE_MAX_ENTRIES 64
28
- #define CACHE_MIN_TTL 120 /* 2 minutes */
27
+ #define CACHE_HTABLE_SIZE 32
28
+ #define CACHE_MAX_ENTRIES 64
29
+ #define CACHE_MIN_TTL 120 /* 2 minutes */
30
+ #define CACHE_DEFAULT_TTL 300 /* 5 minutes */
29
31
30
32
#define IS_DFS_INTERLINK (v ) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
31
33
@@ -50,10 +52,9 @@ struct cache_entry {
50
52
};
51
53
52
54
static struct kmem_cache * cache_slab __read_mostly ;
53
- static struct workqueue_struct * dfscache_wq __read_mostly ;
55
+ struct workqueue_struct * dfscache_wq ;
54
56
55
- static int cache_ttl ;
56
- static DEFINE_SPINLOCK (cache_ttl_lock );
57
+ atomic_t dfs_cache_ttl ;
57
58
58
59
static struct nls_table * cache_cp ;
59
60
@@ -65,10 +66,6 @@ static atomic_t cache_count;
65
66
static struct hlist_head cache_htable [CACHE_HTABLE_SIZE ];
66
67
static DECLARE_RWSEM (htable_rw_lock );
67
68
68
- static void refresh_cache_worker (struct work_struct * work );
69
-
70
- static DECLARE_DELAYED_WORK (refresh_task , refresh_cache_worker );
71
-
72
69
/**
73
70
* dfs_cache_canonical_path - get a canonical DFS path
74
71
*
@@ -290,7 +287,9 @@ int dfs_cache_init(void)
290
287
int rc ;
291
288
int i ;
292
289
293
- dfscache_wq = alloc_workqueue ("cifs-dfscache" , WQ_FREEZABLE | WQ_UNBOUND , 1 );
290
+ dfscache_wq = alloc_workqueue ("cifs-dfscache" ,
291
+ WQ_UNBOUND |WQ_FREEZABLE |WQ_MEM_RECLAIM ,
292
+ 0 );
294
293
if (!dfscache_wq )
295
294
return - ENOMEM ;
296
295
@@ -306,6 +305,7 @@ int dfs_cache_init(void)
306
305
INIT_HLIST_HEAD (& cache_htable [i ]);
307
306
308
307
atomic_set (& cache_count , 0 );
308
+ atomic_set (& dfs_cache_ttl , CACHE_DEFAULT_TTL );
309
309
cache_cp = load_nls ("utf8" );
310
310
if (!cache_cp )
311
311
cache_cp = load_nls_default ();
@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
480
480
int rc ;
481
481
struct cache_entry * ce ;
482
482
unsigned int hash ;
483
+ int ttl ;
483
484
484
485
WARN_ON (!rwsem_is_locked (& htable_rw_lock ));
485
486
@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
496
497
if (IS_ERR (ce ))
497
498
return ce ;
498
499
499
- spin_lock (& cache_ttl_lock );
500
- if (!cache_ttl ) {
501
- cache_ttl = ce -> ttl ;
502
- queue_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
503
- } else {
504
- cache_ttl = min_t (int , cache_ttl , ce -> ttl );
505
- mod_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
506
- }
507
- spin_unlock (& cache_ttl_lock );
500
+ ttl = min_t (int , atomic_read (& dfs_cache_ttl ), ce -> ttl );
501
+ atomic_set (& dfs_cache_ttl , ttl );
508
502
509
503
hlist_add_head (& ce -> hlist , & cache_htable [hash ]);
510
504
dump_ce (ce );
@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
616
610
*/
617
611
void dfs_cache_destroy (void )
618
612
{
619
- cancel_delayed_work_sync (& refresh_task );
620
613
unload_nls (cache_cp );
621
614
flush_cache_ents ();
622
615
kmem_cache_destroy (cache_slab );
@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
1142
1135
* target shares in @refs.
1143
1136
*/
1144
1137
static void mark_for_reconnect_if_needed (struct TCP_Server_Info * server ,
1138
+ const char * path ,
1145
1139
struct dfs_cache_tgt_list * old_tl ,
1146
1140
struct dfs_cache_tgt_list * new_tl )
1147
1141
{
@@ -1153,22 +1147,39 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1153
1147
nit = dfs_cache_get_next_tgt (new_tl , nit )) {
1154
1148
if (target_share_equal (server ,
1155
1149
dfs_cache_get_tgt_name (oit ),
1156
- dfs_cache_get_tgt_name (nit )))
1150
+ dfs_cache_get_tgt_name (nit ))) {
1151
+ dfs_cache_noreq_update_tgthint (path , nit );
1157
1152
return ;
1153
+ }
1158
1154
}
1159
1155
}
1160
1156
1161
1157
cifs_dbg (FYI , "%s: no cached or matched targets. mark dfs share for reconnect.\n" , __func__ );
1162
1158
cifs_signal_cifsd_for_reconnect (server , true);
1163
1159
}
1164
1160
1161
+ static bool is_ses_good (struct cifs_ses * ses )
1162
+ {
1163
+ struct TCP_Server_Info * server = ses -> server ;
1164
+ struct cifs_tcon * tcon = ses -> tcon_ipc ;
1165
+ bool ret ;
1166
+
1167
+ spin_lock (& ses -> ses_lock );
1168
+ spin_lock (& ses -> chan_lock );
1169
+ ret = !cifs_chan_needs_reconnect (ses , server ) &&
1170
+ ses -> ses_status == SES_GOOD &&
1171
+ !tcon -> need_reconnect ;
1172
+ spin_unlock (& ses -> chan_lock );
1173
+ spin_unlock (& ses -> ses_lock );
1174
+ return ret ;
1175
+ }
1176
+
1165
1177
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1166
- static int __refresh_tcon (const char * path , struct cifs_tcon * tcon , bool force_refresh )
1178
+ static int __refresh_tcon (const char * path , struct cifs_ses * ses , bool force_refresh )
1167
1179
{
1168
1180
struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT (old_tl );
1169
1181
struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT (new_tl );
1170
- struct cifs_ses * ses = CIFS_DFS_ROOT_SES (tcon -> ses );
1171
- struct cifs_tcon * ipc = ses -> tcon_ipc ;
1182
+ struct TCP_Server_Info * server = ses -> server ;
1172
1183
bool needs_refresh = false;
1173
1184
struct cache_entry * ce ;
1174
1185
unsigned int xid ;
@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
1190
1201
goto out ;
1191
1202
}
1192
1203
1193
- spin_lock ( & ipc -> tc_lock );
1194
- if (ipc -> status != TID_GOOD ) {
1195
- spin_unlock ( & ipc -> tc_lock );
1196
- cifs_dbg ( FYI , "%s: skip cache refresh due to disconnected ipc\n" , __func__ );
1204
+ ses = CIFS_DFS_ROOT_SES ( ses );
1205
+ if (! is_ses_good ( ses ) ) {
1206
+ cifs_dbg ( FYI , "%s: skip cache refresh due to disconnected ipc\n" ,
1207
+ __func__ );
1197
1208
goto out ;
1198
1209
}
1199
- spin_unlock (& ipc -> tc_lock );
1200
1210
1201
1211
ce = cache_refresh_path (xid , ses , path , true);
1202
1212
if (!IS_ERR (ce )) {
1203
1213
rc = get_targets (ce , & new_tl );
1204
1214
up_read (& htable_rw_lock );
1205
1215
cifs_dbg (FYI , "%s: get_targets: %d\n" , __func__ , rc );
1206
- mark_for_reconnect_if_needed (tcon -> ses -> server , & old_tl , & new_tl );
1216
+ mark_for_reconnect_if_needed (server , path , & old_tl , & new_tl );
1207
1217
}
1208
1218
1209
1219
out :
@@ -1216,10 +1226,11 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
1216
1226
static int refresh_tcon (struct cifs_tcon * tcon , bool force_refresh )
1217
1227
{
1218
1228
struct TCP_Server_Info * server = tcon -> ses -> server ;
1229
+ struct cifs_ses * ses = tcon -> ses ;
1219
1230
1220
1231
mutex_lock (& server -> refpath_lock );
1221
1232
if (server -> leaf_fullpath )
1222
- __refresh_tcon (server -> leaf_fullpath + 1 , tcon , force_refresh );
1233
+ __refresh_tcon (server -> leaf_fullpath + 1 , ses , force_refresh );
1223
1234
mutex_unlock (& server -> refpath_lock );
1224
1235
return 0 ;
1225
1236
}
@@ -1263,60 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1263
1274
return refresh_tcon (tcon , true);
1264
1275
}
1265
1276
1266
- /*
1267
- * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1268
- * from a DFS referral.
1269
- */
1270
- static void refresh_cache_worker (struct work_struct * work )
1277
+ /* Refresh all DFS referrals related to DFS tcon */
1278
+ void dfs_cache_refresh (struct work_struct * work )
1271
1279
{
1272
1280
struct TCP_Server_Info * server ;
1273
- struct cifs_tcon * tcon , * ntcon ;
1274
- struct list_head tcons ;
1281
+ struct dfs_root_ses * rses ;
1282
+ struct cifs_tcon * tcon ;
1275
1283
struct cifs_ses * ses ;
1276
1284
1277
- INIT_LIST_HEAD (& tcons );
1285
+ tcon = container_of (work , struct cifs_tcon , dfs_cache_work .work );
1286
+ ses = tcon -> ses ;
1287
+ server = ses -> server ;
1278
1288
1279
- spin_lock (& cifs_tcp_ses_lock );
1280
- list_for_each_entry (server , & cifs_tcp_ses_list , tcp_ses_list ) {
1281
- spin_lock (& server -> srv_lock );
1282
- if (!server -> leaf_fullpath ) {
1283
- spin_unlock (& server -> srv_lock );
1284
- continue ;
1285
- }
1286
- spin_unlock (& server -> srv_lock );
1287
-
1288
- list_for_each_entry (ses , & server -> smb_ses_list , smb_ses_list ) {
1289
- if (ses -> tcon_ipc ) {
1290
- ses -> ses_count ++ ;
1291
- list_add_tail (& ses -> tcon_ipc -> ulist , & tcons );
1292
- }
1293
- list_for_each_entry (tcon , & ses -> tcon_list , tcon_list ) {
1294
- if (!tcon -> ipc ) {
1295
- tcon -> tc_count ++ ;
1296
- list_add_tail (& tcon -> ulist , & tcons );
1297
- }
1298
- }
1299
- }
1300
- }
1301
- spin_unlock (& cifs_tcp_ses_lock );
1302
-
1303
- list_for_each_entry_safe (tcon , ntcon , & tcons , ulist ) {
1304
- struct TCP_Server_Info * server = tcon -> ses -> server ;
1305
-
1306
- list_del_init (& tcon -> ulist );
1289
+ mutex_lock (& server -> refpath_lock );
1290
+ if (server -> leaf_fullpath )
1291
+ __refresh_tcon (server -> leaf_fullpath + 1 , ses , false);
1292
+ mutex_unlock (& server -> refpath_lock );
1307
1293
1294
+ list_for_each_entry (rses , & tcon -> dfs_ses_list , list ) {
1295
+ ses = rses -> ses ;
1296
+ server = ses -> server ;
1308
1297
mutex_lock (& server -> refpath_lock );
1309
1298
if (server -> leaf_fullpath )
1310
- __refresh_tcon (server -> leaf_fullpath + 1 , tcon , false);
1299
+ __refresh_tcon (server -> leaf_fullpath + 1 , ses , false);
1311
1300
mutex_unlock (& server -> refpath_lock );
1312
-
1313
- if (tcon -> ipc )
1314
- cifs_put_smb_ses (tcon -> ses );
1315
- else
1316
- cifs_put_tcon (tcon );
1317
1301
}
1318
1302
1319
- spin_lock (& cache_ttl_lock );
1320
- queue_delayed_work (dfscache_wq , & refresh_task , cache_ttl * HZ );
1321
- spin_unlock (& cache_ttl_lock );
1303
+ queue_delayed_work (dfscache_wq , & tcon -> dfs_cache_work ,
1304
+ atomic_read (& dfs_cache_ttl ) * HZ );
1322
1305
}
0 commit comments