Skip to content

Commit 60dbb01

Browse files
committed
Merge branch 'master' of git://1984.lsi.us.es/net-2.6
2 parents 4b0ef1f + 2f46e07 commit 60dbb01

File tree

7 files changed

+55
-109
lines changed

7 files changed

+55
-109
lines changed

include/linux/if_bridge.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ struct __fdb_entry {
103103

104104
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
105105

106-
typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
106+
typedef int br_should_route_hook_t(struct sk_buff *skb);
107107
extern br_should_route_hook_t __rcu *br_should_route_hook;
108108

109109
#endif

include/linux/netfilter/x_tables.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info);
472472
* necessary for reading the counters.
473473
*/
474474
struct xt_info_lock {
475-
spinlock_t lock;
475+
seqlock_t lock;
476476
unsigned char readers;
477477
};
478478
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@@ -497,15 +497,15 @@ static inline void xt_info_rdlock_bh(void)
497497
local_bh_disable();
498498
lock = &__get_cpu_var(xt_info_locks);
499499
if (likely(!lock->readers++))
500-
spin_lock(&lock->lock);
500+
write_seqlock(&lock->lock);
501501
}
502502

503503
static inline void xt_info_rdunlock_bh(void)
504504
{
505505
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
506506

507507
if (likely(!--lock->readers))
508-
spin_unlock(&lock->lock);
508+
write_sequnlock(&lock->lock);
509509
local_bh_enable();
510510
}
511511

@@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void)
516516
*/
517517
static inline void xt_info_wrlock(unsigned int cpu)
518518
{
519-
spin_lock(&per_cpu(xt_info_locks, cpu).lock);
519+
write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
520520
}
521521

522522
static inline void xt_info_wrunlock(unsigned int cpu)
523523
{
524-
spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
524+
write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
525525
}
526526

527527
/*

net/ipv4/netfilter/arp_tables.c

Lines changed: 14 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
710710
struct arpt_entry *iter;
711711
unsigned int cpu;
712712
unsigned int i;
713-
unsigned int curcpu = get_cpu();
714-
715-
/* Instead of clearing (by a previous call to memset())
716-
* the counters and using adds, we set the counters
717-
* with data used by 'current' CPU
718-
*
719-
* Bottom half has to be disabled to prevent deadlock
720-
* if new softirq were to run and call ipt_do_table
721-
*/
722-
local_bh_disable();
723-
i = 0;
724-
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
725-
SET_COUNTER(counters[i], iter->counters.bcnt,
726-
iter->counters.pcnt);
727-
++i;
728-
}
729-
local_bh_enable();
730-
/* Processing counters from other cpus, we can let bottom half enabled,
731-
* (preemption is disabled)
732-
*/
733713

734714
for_each_possible_cpu(cpu) {
735-
if (cpu == curcpu)
736-
continue;
715+
seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
716+
737717
i = 0;
738-
local_bh_disable();
739-
xt_info_wrlock(cpu);
740718
xt_entry_foreach(iter, t->entries[cpu], t->size) {
741-
ADD_COUNTER(counters[i], iter->counters.bcnt,
742-
iter->counters.pcnt);
719+
u64 bcnt, pcnt;
720+
unsigned int start;
721+
722+
do {
723+
start = read_seqbegin(lock);
724+
bcnt = iter->counters.bcnt;
725+
pcnt = iter->counters.pcnt;
726+
} while (read_seqretry(lock, start));
727+
728+
ADD_COUNTER(counters[i], bcnt, pcnt);
743729
++i;
744730
}
745-
xt_info_wrunlock(cpu);
746-
local_bh_enable();
747731
}
748-
put_cpu();
749732
}
750733

751734
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
759742
* about).
760743
*/
761744
countersize = sizeof(struct xt_counters) * private->number;
762-
counters = vmalloc(countersize);
745+
counters = vzalloc(countersize);
763746

764747
if (counters == NULL)
765748
return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
1007990
struct arpt_entry *iter;
1008991

1009992
ret = 0;
1010-
counters = vmalloc(num_counters * sizeof(struct xt_counters));
993+
counters = vzalloc(num_counters * sizeof(struct xt_counters));
1011994
if (!counters) {
1012995
ret = -ENOMEM;
1013996
goto out;

net/ipv4/netfilter/ip_tables.c

Lines changed: 14 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
884884
struct ipt_entry *iter;
885885
unsigned int cpu;
886886
unsigned int i;
887-
unsigned int curcpu = get_cpu();
888-
889-
/* Instead of clearing (by a previous call to memset())
890-
* the counters and using adds, we set the counters
891-
* with data used by 'current' CPU.
892-
*
893-
* Bottom half has to be disabled to prevent deadlock
894-
* if new softirq were to run and call ipt_do_table
895-
*/
896-
local_bh_disable();
897-
i = 0;
898-
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
899-
SET_COUNTER(counters[i], iter->counters.bcnt,
900-
iter->counters.pcnt);
901-
++i;
902-
}
903-
local_bh_enable();
904-
/* Processing counters from other cpus, we can let bottom half enabled,
905-
* (preemption is disabled)
906-
*/
907887

908888
for_each_possible_cpu(cpu) {
909-
if (cpu == curcpu)
910-
continue;
889+
seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
890+
911891
i = 0;
912-
local_bh_disable();
913-
xt_info_wrlock(cpu);
914892
xt_entry_foreach(iter, t->entries[cpu], t->size) {
915-
ADD_COUNTER(counters[i], iter->counters.bcnt,
916-
iter->counters.pcnt);
893+
u64 bcnt, pcnt;
894+
unsigned int start;
895+
896+
do {
897+
start = read_seqbegin(lock);
898+
bcnt = iter->counters.bcnt;
899+
pcnt = iter->counters.pcnt;
900+
} while (read_seqretry(lock, start));
901+
902+
ADD_COUNTER(counters[i], bcnt, pcnt);
917903
++i; /* macro does multi eval of i */
918904
}
919-
xt_info_wrunlock(cpu);
920-
local_bh_enable();
921905
}
922-
put_cpu();
923906
}
924907

925908
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
932915
(other than comefrom, which userspace doesn't care
933916
about). */
934917
countersize = sizeof(struct xt_counters) * private->number;
935-
counters = vmalloc(countersize);
918+
counters = vzalloc(countersize);
936919

937920
if (counters == NULL)
938921
return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
12031186
struct ipt_entry *iter;
12041187

12051188
ret = 0;
1206-
counters = vmalloc(num_counters * sizeof(struct xt_counters));
1189+
counters = vzalloc(num_counters * sizeof(struct xt_counters));
12071190
if (!counters) {
12081191
ret = -ENOMEM;
12091192
goto out;

net/ipv6/netfilter/ip6_tables.c

Lines changed: 14 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
897897
struct ip6t_entry *iter;
898898
unsigned int cpu;
899899
unsigned int i;
900-
unsigned int curcpu = get_cpu();
901-
902-
/* Instead of clearing (by a previous call to memset())
903-
* the counters and using adds, we set the counters
904-
* with data used by 'current' CPU
905-
*
906-
* Bottom half has to be disabled to prevent deadlock
907-
* if new softirq were to run and call ipt_do_table
908-
*/
909-
local_bh_disable();
910-
i = 0;
911-
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
912-
SET_COUNTER(counters[i], iter->counters.bcnt,
913-
iter->counters.pcnt);
914-
++i;
915-
}
916-
local_bh_enable();
917-
/* Processing counters from other cpus, we can let bottom half enabled,
918-
* (preemption is disabled)
919-
*/
920900

921901
for_each_possible_cpu(cpu) {
922-
if (cpu == curcpu)
923-
continue;
902+
seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
903+
924904
i = 0;
925-
local_bh_disable();
926-
xt_info_wrlock(cpu);
927905
xt_entry_foreach(iter, t->entries[cpu], t->size) {
928-
ADD_COUNTER(counters[i], iter->counters.bcnt,
929-
iter->counters.pcnt);
906+
u64 bcnt, pcnt;
907+
unsigned int start;
908+
909+
do {
910+
start = read_seqbegin(lock);
911+
bcnt = iter->counters.bcnt;
912+
pcnt = iter->counters.pcnt;
913+
} while (read_seqretry(lock, start));
914+
915+
ADD_COUNTER(counters[i], bcnt, pcnt);
930916
++i;
931917
}
932-
xt_info_wrunlock(cpu);
933-
local_bh_enable();
934918
}
935-
put_cpu();
936919
}
937920

938921
static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
945928
(other than comefrom, which userspace doesn't care
946929
about). */
947930
countersize = sizeof(struct xt_counters) * private->number;
948-
counters = vmalloc(countersize);
931+
counters = vzalloc(countersize);
949932

950933
if (counters == NULL)
951934
return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
12161199
struct ip6t_entry *iter;
12171200

12181201
ret = 0;
1219-
counters = vmalloc(num_counters * sizeof(struct xt_counters));
1202+
counters = vzalloc(num_counters * sizeof(struct xt_counters));
12201203
if (!counters) {
12211204
ret = -ENOMEM;
12221205
goto out;

net/netfilter/nf_conntrack_netlink.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
645645
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
646646
u_int8_t l3proto = nfmsg->nfgen_family;
647647

648-
rcu_read_lock();
648+
spin_lock_bh(&nf_conntrack_lock);
649649
last = (struct nf_conn *)cb->args[1];
650650
for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
651651
restart:
652-
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
652+
hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
653653
hnnode) {
654654
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
655655
continue;
656656
ct = nf_ct_tuplehash_to_ctrack(h);
657-
if (!atomic_inc_not_zero(&ct->ct_general.use))
658-
continue;
659657
/* Dump entries of a given L3 protocol number.
660658
* If it is not specified, ie. l3proto == 0,
661659
* then dump everything. */
662660
if (l3proto && nf_ct_l3num(ct) != l3proto)
663-
goto releasect;
661+
continue;
664662
if (cb->args[1]) {
665663
if (ct != last)
666-
goto releasect;
664+
continue;
667665
cb->args[1] = 0;
668666
}
669667
if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -681,16 +679,14 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
681679
if (acct)
682680
memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
683681
}
684-
releasect:
685-
nf_ct_put(ct);
686682
}
687683
if (cb->args[1]) {
688684
cb->args[1] = 0;
689685
goto restart;
690686
}
691687
}
692688
out:
693-
rcu_read_unlock();
689+
spin_unlock_bh(&nf_conntrack_lock);
694690
if (last)
695691
nf_ct_put(last);
696692

net/netfilter/x_tables.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1325,7 +1325,8 @@ static int __init xt_init(void)
13251325

13261326
for_each_possible_cpu(i) {
13271327
struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1328-
spin_lock_init(&lock->lock);
1328+
1329+
seqlock_init(&lock->lock);
13291330
lock->readers = 0;
13301331
}
13311332

0 commit comments

Comments
 (0)