Skip to content

Commit 6be2ea3

Browse files
Paulo AlcantaraSteve French
authored andcommitted
cifs: avoid potential races when handling multiple dfs tcons
Now that a DFS tcon manages its own list of DFS referrals and sessions, there is no point in having a single worker to refresh referrals of all DFS tcons. Make it faster and less prone to race conditions when having several mounts by queueing a worker per DFS tcon that will take care of refreshing only the DFS referrals related to it. Cc: [email protected] # v6.2+ Signed-off-by: Paulo Alcantara (SUSE) <[email protected]> Signed-off-by: Steve French <[email protected]>
1 parent 3dc9c43 commit 6be2ea3

File tree

5 files changed

+80
-79
lines changed

5 files changed

+80
-79
lines changed

fs/cifs/cifsglob.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1238,8 +1238,8 @@ struct cifs_tcon {
12381238
struct cached_fids *cfids;
12391239
/* BB add field for back pointer to sb struct(s)? */
12401240
#ifdef CONFIG_CIFS_DFS_UPCALL
1241-
struct list_head ulist; /* cache update list */
12421241
struct list_head dfs_ses_list;
1242+
struct delayed_work dfs_cache_work;
12431243
#endif
12441244
struct delayed_work query_interfaces; /* query interfaces workqueue job */
12451245
};

fs/cifs/connect.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2340,6 +2340,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
23402340

23412341
/* cancel polling of interfaces */
23422342
cancel_delayed_work_sync(&tcon->query_interfaces);
2343+
#ifdef CONFIG_CIFS_DFS_UPCALL
2344+
cancel_delayed_work_sync(&tcon->dfs_cache_work);
2345+
#endif
23432346

23442347
if (tcon->use_witness) {
23452348
int rc;
@@ -2587,7 +2590,9 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
25872590
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
25882591
(SMB_INTERFACE_POLL_INTERVAL * HZ));
25892592
}
2590-
2593+
#ifdef CONFIG_CIFS_DFS_UPCALL
2594+
INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
2595+
#endif
25912596
spin_lock(&cifs_tcp_ses_lock);
25922597
list_add(&tcon->tcon_list, &ses->tcon_list);
25932598
spin_unlock(&cifs_tcp_ses_lock);

fs/cifs/dfs.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,8 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
157157
rc = cifs_is_path_remote(mnt_ctx);
158158
}
159159

160+
dfs_cache_noreq_update_tgthint(ref_path + 1, tit);
161+
160162
if (rc == -EREMOTE && is_refsrv) {
161163
rc2 = get_root_smb_session(mnt_ctx);
162164
if (rc2)
@@ -259,6 +261,8 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
259261
if (list_empty(&tcon->dfs_ses_list)) {
260262
list_replace_init(&mnt_ctx->dfs_ses_list,
261263
&tcon->dfs_ses_list);
264+
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
265+
dfs_cache_get_ttl() * HZ);
262266
} else {
263267
dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
264268
}

fs/cifs/dfs_cache.c

Lines changed: 60 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,14 @@
2020
#include "cifs_unicode.h"
2121
#include "smb2glob.h"
2222
#include "dns_resolve.h"
23+
#include "dfs.h"
2324

2425
#include "dfs_cache.h"
2526

26-
#define CACHE_HTABLE_SIZE 32
27-
#define CACHE_MAX_ENTRIES 64
28-
#define CACHE_MIN_TTL 120 /* 2 minutes */
27+
#define CACHE_HTABLE_SIZE 32
28+
#define CACHE_MAX_ENTRIES 64
29+
#define CACHE_MIN_TTL 120 /* 2 minutes */
30+
#define CACHE_DEFAULT_TTL 300 /* 5 minutes */
2931

3032
#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
3133

@@ -50,10 +52,9 @@ struct cache_entry {
5052
};
5153

5254
static struct kmem_cache *cache_slab __read_mostly;
53-
static struct workqueue_struct *dfscache_wq __read_mostly;
55+
struct workqueue_struct *dfscache_wq;
5456

55-
static int cache_ttl;
56-
static DEFINE_SPINLOCK(cache_ttl_lock);
57+
atomic_t dfs_cache_ttl;
5758

5859
static struct nls_table *cache_cp;
5960

@@ -65,10 +66,6 @@ static atomic_t cache_count;
6566
static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
6667
static DECLARE_RWSEM(htable_rw_lock);
6768

68-
static void refresh_cache_worker(struct work_struct *work);
69-
70-
static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
71-
7269
/**
7370
* dfs_cache_canonical_path - get a canonical DFS path
7471
*
@@ -290,7 +287,9 @@ int dfs_cache_init(void)
290287
int rc;
291288
int i;
292289

293-
dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
290+
dfscache_wq = alloc_workqueue("cifs-dfscache",
291+
WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
292+
0);
294293
if (!dfscache_wq)
295294
return -ENOMEM;
296295

@@ -306,6 +305,7 @@ int dfs_cache_init(void)
306305
INIT_HLIST_HEAD(&cache_htable[i]);
307306

308307
atomic_set(&cache_count, 0);
308+
atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
309309
cache_cp = load_nls("utf8");
310310
if (!cache_cp)
311311
cache_cp = load_nls_default();
@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
480480
int rc;
481481
struct cache_entry *ce;
482482
unsigned int hash;
483+
int ttl;
483484

484485
WARN_ON(!rwsem_is_locked(&htable_rw_lock));
485486

@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
496497
if (IS_ERR(ce))
497498
return ce;
498499

499-
spin_lock(&cache_ttl_lock);
500-
if (!cache_ttl) {
501-
cache_ttl = ce->ttl;
502-
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
503-
} else {
504-
cache_ttl = min_t(int, cache_ttl, ce->ttl);
505-
mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
506-
}
507-
spin_unlock(&cache_ttl_lock);
500+
ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
501+
atomic_set(&dfs_cache_ttl, ttl);
508502

509503
hlist_add_head(&ce->hlist, &cache_htable[hash]);
510504
dump_ce(ce);
@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
616610
*/
617611
void dfs_cache_destroy(void)
618612
{
619-
cancel_delayed_work_sync(&refresh_task);
620613
unload_nls(cache_cp);
621614
flush_cache_ents();
622615
kmem_cache_destroy(cache_slab);
@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
11421135
* target shares in @refs.
11431136
*/
11441137
static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1138+
const char *path,
11451139
struct dfs_cache_tgt_list *old_tl,
11461140
struct dfs_cache_tgt_list *new_tl)
11471141
{
@@ -1153,22 +1147,39 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
11531147
nit = dfs_cache_get_next_tgt(new_tl, nit)) {
11541148
if (target_share_equal(server,
11551149
dfs_cache_get_tgt_name(oit),
1156-
dfs_cache_get_tgt_name(nit)))
1150+
dfs_cache_get_tgt_name(nit))) {
1151+
dfs_cache_noreq_update_tgthint(path, nit);
11571152
return;
1153+
}
11581154
}
11591155
}
11601156

11611157
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
11621158
cifs_signal_cifsd_for_reconnect(server, true);
11631159
}
11641160

1161+
static bool is_ses_good(struct cifs_ses *ses)
1162+
{
1163+
struct TCP_Server_Info *server = ses->server;
1164+
struct cifs_tcon *tcon = ses->tcon_ipc;
1165+
bool ret;
1166+
1167+
spin_lock(&ses->ses_lock);
1168+
spin_lock(&ses->chan_lock);
1169+
ret = !cifs_chan_needs_reconnect(ses, server) &&
1170+
ses->ses_status == SES_GOOD &&
1171+
!tcon->need_reconnect;
1172+
spin_unlock(&ses->chan_lock);
1173+
spin_unlock(&ses->ses_lock);
1174+
return ret;
1175+
}
1176+
11651177
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1166-
static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
1178+
static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
11671179
{
11681180
struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
11691181
struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
1170-
struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
1171-
struct cifs_tcon *ipc = ses->tcon_ipc;
1182+
struct TCP_Server_Info *server = ses->server;
11721183
bool needs_refresh = false;
11731184
struct cache_entry *ce;
11741185
unsigned int xid;
@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
11901201
goto out;
11911202
}
11921203

1193-
spin_lock(&ipc->tc_lock);
1194-
if (ipc->status != TID_GOOD) {
1195-
spin_unlock(&ipc->tc_lock);
1196-
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
1204+
ses = CIFS_DFS_ROOT_SES(ses);
1205+
if (!is_ses_good(ses)) {
1206+
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
1207+
__func__);
11971208
goto out;
11981209
}
1199-
spin_unlock(&ipc->tc_lock);
12001210

12011211
ce = cache_refresh_path(xid, ses, path, true);
12021212
if (!IS_ERR(ce)) {
12031213
rc = get_targets(ce, &new_tl);
12041214
up_read(&htable_rw_lock);
12051215
cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1206-
mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
1216+
mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
12071217
}
12081218

12091219
out:
@@ -1216,10 +1226,11 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
12161226
static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
12171227
{
12181228
struct TCP_Server_Info *server = tcon->ses->server;
1229+
struct cifs_ses *ses = tcon->ses;
12191230

12201231
mutex_lock(&server->refpath_lock);
12211232
if (server->leaf_fullpath)
1222-
__refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
1233+
__refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
12231234
mutex_unlock(&server->refpath_lock);
12241235
return 0;
12251236
}
@@ -1263,60 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
12631274
return refresh_tcon(tcon, true);
12641275
}
12651276

1266-
/*
1267-
* Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1268-
* from a DFS referral.
1269-
*/
1270-
static void refresh_cache_worker(struct work_struct *work)
1277+
/* Refresh all DFS referrals related to DFS tcon */
1278+
void dfs_cache_refresh(struct work_struct *work)
12711279
{
12721280
struct TCP_Server_Info *server;
1273-
struct cifs_tcon *tcon, *ntcon;
1274-
struct list_head tcons;
1281+
struct dfs_root_ses *rses;
1282+
struct cifs_tcon *tcon;
12751283
struct cifs_ses *ses;
12761284

1277-
INIT_LIST_HEAD(&tcons);
1285+
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
1286+
ses = tcon->ses;
1287+
server = ses->server;
12781288

1279-
spin_lock(&cifs_tcp_ses_lock);
1280-
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1281-
spin_lock(&server->srv_lock);
1282-
if (!server->leaf_fullpath) {
1283-
spin_unlock(&server->srv_lock);
1284-
continue;
1285-
}
1286-
spin_unlock(&server->srv_lock);
1287-
1288-
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1289-
if (ses->tcon_ipc) {
1290-
ses->ses_count++;
1291-
list_add_tail(&ses->tcon_ipc->ulist, &tcons);
1292-
}
1293-
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1294-
if (!tcon->ipc) {
1295-
tcon->tc_count++;
1296-
list_add_tail(&tcon->ulist, &tcons);
1297-
}
1298-
}
1299-
}
1300-
}
1301-
spin_unlock(&cifs_tcp_ses_lock);
1302-
1303-
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1304-
struct TCP_Server_Info *server = tcon->ses->server;
1305-
1306-
list_del_init(&tcon->ulist);
1289+
mutex_lock(&server->refpath_lock);
1290+
if (server->leaf_fullpath)
1291+
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
1292+
mutex_unlock(&server->refpath_lock);
13071293

1294+
list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
1295+
ses = rses->ses;
1296+
server = ses->server;
13081297
mutex_lock(&server->refpath_lock);
13091298
if (server->leaf_fullpath)
1310-
__refresh_tcon(server->leaf_fullpath + 1, tcon, false);
1299+
__refresh_tcon(server->leaf_fullpath + 1, ses, false);
13111300
mutex_unlock(&server->refpath_lock);
1312-
1313-
if (tcon->ipc)
1314-
cifs_put_smb_ses(tcon->ses);
1315-
else
1316-
cifs_put_tcon(tcon);
13171301
}
13181302

1319-
spin_lock(&cache_ttl_lock);
1320-
queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1321-
spin_unlock(&cache_ttl_lock);
1303+
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
1304+
atomic_read(&dfs_cache_ttl) * HZ);
13221305
}

fs/cifs/dfs_cache.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@
1313
#include <linux/uuid.h>
1414
#include "cifsglob.h"
1515

16+
extern struct workqueue_struct *dfscache_wq;
17+
extern atomic_t dfs_cache_ttl;
18+
1619
#define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
1720

1821
struct dfs_cache_tgt_list {
@@ -42,6 +45,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
4245
char **prefix);
4346
char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
4447
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
48+
void dfs_cache_refresh(struct work_struct *work);
4549

4650
static inline struct dfs_cache_tgt_iterator *
4751
dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
@@ -89,4 +93,9 @@ dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
8993
return tl ? tl->tl_numtgts : 0;
9094
}
9195

96+
static inline int dfs_cache_get_ttl(void)
97+
{
98+
return atomic_read(&dfs_cache_ttl);
99+
}
100+
92101
#endif /* _CIFS_DFS_CACHE_H */

0 commit comments

Comments
 (0)