Skip to content

Commit 2ed3bf1

Browse files
Florian Westphalummakynes
authored andcommitted
netfilter: ecache: use dedicated list for event redelivery
This disentangles event redelivery and the percpu dying list. Because entries are now stored on a dedicated list, all entries are in NFCT_ECACHE_DESTROY_FAIL state and all entries still have confirmed bit set -- the reference count is at least 1. The 'struct net' back-pointer can be removed as well. The pcpu dying list will be removed eventually, it has no functionality. Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
1 parent a997157 commit 2ed3bf1

File tree

4 files changed

+82
-73
lines changed

4 files changed

+82
-73
lines changed

include/net/netfilter/nf_conntrack.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,8 @@ union nf_conntrack_expect_proto {
4545

4646
struct nf_conntrack_net_ecache {
4747
struct delayed_work dwork;
48-
struct netns_ct *ct_net;
48+
spinlock_t dying_lock;
49+
struct hlist_nulls_head dying_list;
4950
};
5051

5152
struct nf_conntrack_net {

include/net/netfilter/nf_conntrack_ecache.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
#include <net/netfilter/nf_conntrack_extend.h>
1515

1616
enum nf_ct_ecache_state {
17-
NFCT_ECACHE_UNKNOWN, /* destroy event not sent */
1817
NFCT_ECACHE_DESTROY_FAIL, /* tried but failed to send destroy event */
1918
NFCT_ECACHE_DESTROY_SENT, /* sent destroy event after failure */
2019
};
@@ -23,7 +22,6 @@ struct nf_conntrack_ecache {
2322
unsigned long cache; /* bitops want long */
2423
u16 ctmask; /* bitmask of ct events to be delivered */
2524
u16 expmask; /* bitmask of expect events to be delivered */
26-
enum nf_ct_ecache_state state:8;/* ecache state */
2725
u32 missed; /* missed events */
2826
u32 portid; /* netlink portid of destroyer */
2927
};

net/netfilter/nf_conntrack_core.c

Lines changed: 28 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -660,15 +660,12 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
660660
}
661661
EXPORT_SYMBOL(nf_ct_destroy);
662662

663-
static void nf_ct_delete_from_lists(struct nf_conn *ct)
663+
static void __nf_ct_delete_from_lists(struct nf_conn *ct)
664664
{
665665
struct net *net = nf_ct_net(ct);
666666
unsigned int hash, reply_hash;
667667
unsigned int sequence;
668668

669-
nf_ct_helper_destroy(ct);
670-
671-
local_bh_disable();
672669
do {
673670
sequence = read_seqcount_begin(&nf_conntrack_generation);
674671
hash = hash_conntrack(net,
@@ -681,12 +678,33 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
681678

682679
clean_from_lists(ct);
683680
nf_conntrack_double_unlock(hash, reply_hash);
681+
}
684682

683+
static void nf_ct_delete_from_lists(struct nf_conn *ct)
684+
{
685+
nf_ct_helper_destroy(ct);
686+
local_bh_disable();
687+
688+
__nf_ct_delete_from_lists(ct);
685689
nf_ct_add_to_dying_list(ct);
686690

687691
local_bh_enable();
688692
}
689693

694+
static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
695+
{
696+
#ifdef CONFIG_NF_CONNTRACK_EVENTS
697+
struct nf_conntrack_net *cnet = nf_ct_pernet(nf_ct_net(ct));
698+
699+
spin_lock(&cnet->ecache.dying_lock);
700+
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
701+
&cnet->ecache.dying_list);
702+
spin_unlock(&cnet->ecache.dying_lock);
703+
#else
704+
nf_ct_add_to_dying_list(ct);
705+
#endif
706+
}
707+
690708
bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
691709
{
692710
struct nf_conn_tstamp *tstamp;
@@ -709,7 +727,12 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
709727
/* destroy event was not delivered. nf_ct_put will
710728
* be done by event cache worker on redelivery.
711729
*/
712-
nf_ct_delete_from_lists(ct);
730+
nf_ct_helper_destroy(ct);
731+
local_bh_disable();
732+
__nf_ct_delete_from_lists(ct);
733+
nf_ct_add_to_ecache_list(ct);
734+
local_bh_enable();
735+
713736
nf_conntrack_ecache_work(nf_ct_net(ct), NFCT_ECACHE_DESTROY_FAIL);
714737
return false;
715738
}

net/netfilter/nf_conntrack_ecache.c

Lines changed: 52 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
#include <linux/vmalloc.h>
1717
#include <linux/stddef.h>
1818
#include <linux/err.h>
19-
#include <linux/percpu.h>
2019
#include <linux/kernel.h>
2120
#include <linux/netdevice.h>
2221
#include <linux/slab.h>
@@ -29,103 +28,89 @@
2928

3029
static DEFINE_MUTEX(nf_ct_ecache_mutex);
3130

32-
#define ECACHE_RETRY_WAIT (HZ/10)
33-
#define ECACHE_STACK_ALLOC (256 / sizeof(void *))
31+
#define DYING_NULLS_VAL ((1 << 30) + 1)
32+
#define ECACHE_MAX_JIFFIES msecs_to_jiffies(10)
33+
#define ECACHE_RETRY_JIFFIES msecs_to_jiffies(10)
3434

3535
enum retry_state {
3636
STATE_CONGESTED,
3737
STATE_RESTART,
3838
STATE_DONE,
3939
};
4040

41-
static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
41+
static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
4242
{
43-
struct nf_conn *refs[ECACHE_STACK_ALLOC];
43+
unsigned long stop = jiffies + ECACHE_MAX_JIFFIES;
44+
struct hlist_nulls_head evicted_list;
4445
enum retry_state ret = STATE_DONE;
4546
struct nf_conntrack_tuple_hash *h;
4647
struct hlist_nulls_node *n;
47-
unsigned int evicted = 0;
48+
unsigned int sent;
4849

49-
spin_lock(&pcpu->lock);
50+
INIT_HLIST_NULLS_HEAD(&evicted_list, DYING_NULLS_VAL);
5051

51-
hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
52+
next:
53+
sent = 0;
54+
spin_lock_bh(&cnet->ecache.dying_lock);
55+
56+
hlist_nulls_for_each_entry_safe(h, n, &cnet->ecache.dying_list, hnnode) {
5257
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
53-
struct nf_conntrack_ecache *e;
54-
55-
if (!nf_ct_is_confirmed(ct))
56-
continue;
57-
58-
/* This ecache access is safe because the ct is on the
59-
* pcpu dying list and we hold the spinlock -- the entry
60-
* cannot be free'd until after the lock is released.
61-
*
62-
* This is true even if ct has a refcount of 0: the
63-
* cpu that is about to free the entry must remove it
64-
* from the dying list and needs the lock to do so.
65-
*/
66-
e = nf_ct_ecache_find(ct);
67-
if (!e || e->state != NFCT_ECACHE_DESTROY_FAIL)
68-
continue;
6958

70-
/* ct is in NFCT_ECACHE_DESTROY_FAIL state, this means
71-
* the worker owns this entry: the ct will remain valid
72-
* until the worker puts its ct reference.
59+
/* The worker owns all entries, ct remains valid until nf_ct_put
60+
* in the loop below.
7361
*/
7462
if (nf_conntrack_event(IPCT_DESTROY, ct)) {
7563
ret = STATE_CONGESTED;
7664
break;
7765
}
7866

79-
e->state = NFCT_ECACHE_DESTROY_SENT;
80-
refs[evicted] = ct;
67+
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
68+
hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &evicted_list);
8169

82-
if (++evicted >= ARRAY_SIZE(refs)) {
70+
if (time_after(stop, jiffies)) {
8371
ret = STATE_RESTART;
8472
break;
8573
}
74+
75+
if (sent++ > 16) {
76+
spin_unlock_bh(&cnet->ecache.dying_lock);
77+
cond_resched();
78+
goto next;
79+
}
8680
}
8781

88-
spin_unlock(&pcpu->lock);
82+
spin_unlock_bh(&cnet->ecache.dying_lock);
8983

90-
/* can't _put while holding lock */
91-
while (evicted)
92-
nf_ct_put(refs[--evicted]);
84+
hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) {
85+
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
86+
87+
hlist_nulls_add_fake(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
88+
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
89+
nf_ct_put(ct);
90+
91+
cond_resched();
92+
}
9393

9494
return ret;
9595
}
9696

9797
static void ecache_work(struct work_struct *work)
9898
{
9999
struct nf_conntrack_net *cnet = container_of(work, struct nf_conntrack_net, ecache.dwork.work);
100-
struct netns_ct *ctnet = cnet->ecache.ct_net;
101-
int cpu, delay = -1;
102-
struct ct_pcpu *pcpu;
103-
104-
local_bh_disable();
105-
106-
for_each_possible_cpu(cpu) {
107-
enum retry_state ret;
108-
109-
pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
110-
111-
ret = ecache_work_evict_list(pcpu);
112-
113-
switch (ret) {
114-
case STATE_CONGESTED:
115-
delay = ECACHE_RETRY_WAIT;
116-
goto out;
117-
case STATE_RESTART:
118-
delay = 0;
119-
break;
120-
case STATE_DONE:
121-
break;
122-
}
100+
int ret, delay = -1;
101+
102+
ret = ecache_work_evict_list(cnet);
103+
switch (ret) {
104+
case STATE_CONGESTED:
105+
delay = ECACHE_RETRY_JIFFIES;
106+
break;
107+
case STATE_RESTART:
108+
delay = 0;
109+
break;
110+
case STATE_DONE:
111+
break;
123112
}
124113

125-
out:
126-
local_bh_enable();
127-
128-
ctnet->ecache_dwork_pending = delay > 0;
129114
if (delay >= 0)
130115
schedule_delayed_work(&cnet->ecache.dwork, delay);
131116
}
@@ -199,7 +184,6 @@ int nf_conntrack_eventmask_report(unsigned int events, struct nf_conn *ct,
199184
*/
200185
if (e->portid == 0 && portid != 0)
201186
e->portid = portid;
202-
e->state = NFCT_ECACHE_DESTROY_FAIL;
203187
}
204188

205189
return ret;
@@ -297,8 +281,10 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
297281
schedule_delayed_work(&cnet->ecache.dwork, HZ);
298282
net->ct.ecache_dwork_pending = true;
299283
} else if (state == NFCT_ECACHE_DESTROY_SENT) {
300-
net->ct.ecache_dwork_pending = false;
301-
mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
284+
if (!hlist_nulls_empty(&cnet->ecache.dying_list))
285+
mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
286+
else
287+
net->ct.ecache_dwork_pending = false;
302288
}
303289
}
304290

@@ -311,8 +297,9 @@ void nf_conntrack_ecache_pernet_init(struct net *net)
311297

312298
net->ct.sysctl_events = nf_ct_events;
313299

314-
cnet->ecache.ct_net = &net->ct;
315300
INIT_DELAYED_WORK(&cnet->ecache.dwork, ecache_work);
301+
INIT_HLIST_NULLS_HEAD(&cnet->ecache.dying_list, DYING_NULLS_VAL);
302+
spin_lock_init(&cnet->ecache.dying_lock);
316303

317304
BUILD_BUG_ON(__IPCT_MAX >= 16); /* e->ctmask is u16 */
318305
}

0 commit comments

Comments
 (0)