Skip to content

Commit ff0df20

Browse files
committed
Florian Westphal says: ==================== netfilter fixes for net 1. On-demand overlap detection in 'rbtree' set can cause memory leaks. This is broken since 6.2. 2. An earlier fix in 6.4 to address an imbalance in refcounts during transaction error unwinding was incomplete, from Pablo Neira. 3. Disallow adding a rule to a deleted chain, also from Pablo. Broken since 5.9. * tag 'nf-23-07-26' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf: netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID netfilter: nf_tables: skip immediate deactivate in _PREPARE_ERROR netfilter: nft_set_rbtree: fix overlap expiration walk ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents 2526612 + 0ebc106 commit ff0df20

File tree

3 files changed

+35
-17
lines changed

3 files changed

+35
-17
lines changed

net/netfilter/nf_tables_api.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3811,8 +3811,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
38113811
NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
38123812
return PTR_ERR(chain);
38133813
}
3814-
if (nft_chain_is_bound(chain))
3815-
return -EOPNOTSUPP;
38163814

38173815
} else if (nla[NFTA_RULE_CHAIN_ID]) {
38183816
chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID],
@@ -3825,6 +3823,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
38253823
return -EINVAL;
38263824
}
38273825

3826+
if (nft_chain_is_bound(chain))
3827+
return -EOPNOTSUPP;
3828+
38283829
if (nla[NFTA_RULE_HANDLE]) {
38293830
handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
38303831
rule = __nft_rule_lookup(chain, handle);

net/netfilter/nft_immediate.c

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -125,15 +125,27 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
125125
return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
126126
}
127127

128+
static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx,
129+
struct nft_chain *chain,
130+
enum nft_trans_phase phase)
131+
{
132+
struct nft_ctx chain_ctx;
133+
struct nft_rule *rule;
134+
135+
chain_ctx = *ctx;
136+
chain_ctx.chain = chain;
137+
138+
list_for_each_entry(rule, &chain->rules, list)
139+
nft_rule_expr_deactivate(&chain_ctx, rule, phase);
140+
}
141+
128142
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
129143
const struct nft_expr *expr,
130144
enum nft_trans_phase phase)
131145
{
132146
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
133147
const struct nft_data *data = &priv->data;
134-
struct nft_ctx chain_ctx;
135148
struct nft_chain *chain;
136-
struct nft_rule *rule;
137149

138150
if (priv->dreg == NFT_REG_VERDICT) {
139151
switch (data->verdict.code) {
@@ -143,20 +155,17 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
143155
if (!nft_chain_binding(chain))
144156
break;
145157

146-
chain_ctx = *ctx;
147-
chain_ctx.chain = chain;
148-
149-
list_for_each_entry(rule, &chain->rules, list)
150-
nft_rule_expr_deactivate(&chain_ctx, rule, phase);
151-
152158
switch (phase) {
153159
case NFT_TRANS_PREPARE_ERROR:
154160
nf_tables_unbind_chain(ctx, chain);
155-
fallthrough;
161+
nft_deactivate_next(ctx->net, chain);
162+
break;
156163
case NFT_TRANS_PREPARE:
164+
nft_immediate_chain_deactivate(ctx, chain, phase);
157165
nft_deactivate_next(ctx->net, chain);
158166
break;
159167
default:
168+
nft_immediate_chain_deactivate(ctx, chain, phase);
160169
nft_chain_del(chain);
161170
chain->bound = false;
162171
nft_use_dec(&chain->table->use);

net/netfilter/nft_set_rbtree.c

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -217,29 +217,37 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
217217

218218
static int nft_rbtree_gc_elem(const struct nft_set *__set,
219219
struct nft_rbtree *priv,
220-
struct nft_rbtree_elem *rbe)
220+
struct nft_rbtree_elem *rbe,
221+
u8 genmask)
221222
{
222223
struct nft_set *set = (struct nft_set *)__set;
223224
struct rb_node *prev = rb_prev(&rbe->node);
224-
struct nft_rbtree_elem *rbe_prev = NULL;
225+
struct nft_rbtree_elem *rbe_prev;
225226
struct nft_set_gc_batch *gcb;
226227

227228
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
228229
if (!gcb)
229230
return -ENOMEM;
230231

231-
/* search for expired end interval coming before this element. */
232+
/* search for end interval coming before this element.
233+
* end intervals don't carry a timeout extension, they
234+
* are coupled with the interval start element.
235+
*/
232236
while (prev) {
233237
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
234-
if (nft_rbtree_interval_end(rbe_prev))
238+
if (nft_rbtree_interval_end(rbe_prev) &&
239+
nft_set_elem_active(&rbe_prev->ext, genmask))
235240
break;
236241

237242
prev = rb_prev(prev);
238243
}
239244

240-
if (rbe_prev) {
245+
if (prev) {
246+
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
247+
241248
rb_erase(&rbe_prev->node, &priv->root);
242249
atomic_dec(&set->nelems);
250+
nft_set_gc_batch_add(gcb, rbe_prev);
243251
}
244252

245253
rb_erase(&rbe->node, &priv->root);
@@ -321,7 +329,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
321329

322330
/* perform garbage collection to avoid bogus overlap reports. */
323331
if (nft_set_elem_expired(&rbe->ext)) {
324-
err = nft_rbtree_gc_elem(set, priv, rbe);
332+
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
325333
if (err < 0)
326334
return err;
327335

0 commit comments

Comments
 (0)