Skip to content

Commit 4c4d11b

Browse files
committed
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains two Netfilter fixes for your net tree, they are: 1) Fix NAt compilation with UP, from Geert Uytterhoeven. 2) Fix incorrect number of entries when dumping a set, from Vishwanath Pai. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 02388bf + 7f4f7dd commit 4c4d11b

File tree

2 files changed

+19
-7
lines changed

2 files changed

+19
-7
lines changed

net/netfilter/ipset/ip_set_hash_gen.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1041,12 +1041,24 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
10411041
static int
10421042
mtype_head(struct ip_set *set, struct sk_buff *skb)
10431043
{
1044-
const struct htype *h = set->data;
1044+
struct htype *h = set->data;
10451045
const struct htable *t;
10461046
struct nlattr *nested;
10471047
size_t memsize;
10481048
u8 htable_bits;
10491049

1050+
/* If any members have expired, set->elements will be wrong
1051+
* mytype_expire function will update it with the right count.
1052+
* we do not hold set->lock here, so grab it first.
1053+
* set->elements can still be incorrect in the case of a huge set,
1054+
* because elements might time out during the listing.
1055+
*/
1056+
if (SET_WITH_TIMEOUT(set)) {
1057+
spin_lock_bh(&set->lock);
1058+
mtype_expire(set, h);
1059+
spin_unlock_bh(&set->lock);
1060+
}
1061+
10501062
rcu_read_lock_bh();
10511063
t = rcu_dereference_bh_nfnl(h->table);
10521064
memsize = mtype_ahash_memsize(h, t) + set->ext_size;

net/netfilter/nf_nat_core.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct,
429429

430430
srchash = hash_by_src(net,
431431
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
432-
lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)];
432+
lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
433433
spin_lock_bh(lock);
434434
hlist_add_head_rcu(&ct->nat_bysource,
435435
&nf_nat_bysource[srchash]);
@@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
532532
unsigned int h;
533533

534534
h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
535-
spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
535+
spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
536536
hlist_del_rcu(&ct->nat_bysource);
537-
spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]);
537+
spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
538538
}
539539

540540
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
@@ -807,8 +807,8 @@ static int __init nf_nat_init(void)
807807

808808
/* Leave them the same for the moment. */
809809
nf_nat_htable_size = nf_conntrack_htable_size;
810-
if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks))
811-
nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks);
810+
if (nf_nat_htable_size < CONNTRACK_LOCKS)
811+
nf_nat_htable_size = CONNTRACK_LOCKS;
812812

813813
nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
814814
if (!nf_nat_bysource)
@@ -821,7 +821,7 @@ static int __init nf_nat_init(void)
821821
return ret;
822822
}
823823

824-
for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++)
824+
for (i = 0; i < CONNTRACK_LOCKS; i++)
825825
spin_lock_init(&nf_nat_locks[i]);
826826

827827
nf_ct_helper_expectfn_register(&follow_master_nat);

0 commit comments

Comments
 (0)