@@ -74,7 +74,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash);
74
74
75
75
static __read_mostly struct kmem_cache * nf_conntrack_cachep ;
76
76
static __read_mostly spinlock_t nf_conntrack_locks_all_lock ;
77
- static __read_mostly seqcount_t nf_conntrack_generation ;
78
77
static __read_mostly DEFINE_SPINLOCK (nf_conntrack_locks_all_lock );
79
78
static __read_mostly bool nf_conntrack_locks_all ;
80
79
@@ -162,6 +161,7 @@ static void nf_conntrack_all_unlock(void)
162
161
163
162
unsigned int nf_conntrack_htable_size __read_mostly ;
164
163
unsigned int nf_conntrack_max __read_mostly ;
164
+ seqcount_t nf_conntrack_generation __read_mostly ;
165
165
166
166
DEFINE_PER_CPU (struct nf_conn , nf_conntrack_untracked );
167
167
EXPORT_PER_CPU_SYMBOL (nf_conntrack_untracked );
@@ -478,23 +478,6 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
478
478
net_eq (net , nf_ct_net (ct ));
479
479
}
480
480
481
- /* must be called with rcu read lock held */
482
- void nf_conntrack_get_ht (struct hlist_nulls_head * * hash , unsigned int * hsize )
483
- {
484
- struct hlist_nulls_head * hptr ;
485
- unsigned int sequence , hsz ;
486
-
487
- do {
488
- sequence = read_seqcount_begin (& nf_conntrack_generation );
489
- hsz = nf_conntrack_htable_size ;
490
- hptr = nf_conntrack_hash ;
491
- } while (read_seqcount_retry (& nf_conntrack_generation , sequence ));
492
-
493
- * hash = hptr ;
494
- * hsize = hsz ;
495
- }
496
- EXPORT_SYMBOL_GPL (nf_conntrack_get_ht );
497
-
498
481
/*
499
482
* Warning :
500
483
* - Caller must take a reference on returned object
@@ -507,14 +490,11 @@ ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
507
490
struct nf_conntrack_tuple_hash * h ;
508
491
struct hlist_nulls_head * ct_hash ;
509
492
struct hlist_nulls_node * n ;
510
- unsigned int bucket , sequence ;
493
+ unsigned int bucket , hsize ;
511
494
512
495
begin :
513
- do {
514
- sequence = read_seqcount_begin (& nf_conntrack_generation );
515
- bucket = scale_hash (hash );
516
- ct_hash = nf_conntrack_hash ;
517
- } while (read_seqcount_retry (& nf_conntrack_generation , sequence ));
496
+ nf_conntrack_get_ht (& ct_hash , & hsize );
497
+ bucket = reciprocal_scale (hash , hsize );
518
498
519
499
hlist_nulls_for_each_entry_rcu (h , n , & ct_hash [bucket ], hnnode ) {
520
500
if (nf_ct_key_equal (h , tuple , zone , net )) {
@@ -820,18 +800,15 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
820
800
const struct nf_conntrack_zone * zone ;
821
801
struct nf_conntrack_tuple_hash * h ;
822
802
struct hlist_nulls_head * ct_hash ;
823
- unsigned int hash , sequence ;
803
+ unsigned int hash , hsize ;
824
804
struct hlist_nulls_node * n ;
825
805
struct nf_conn * ct ;
826
806
827
807
zone = nf_ct_zone (ignored_conntrack );
828
808
829
809
rcu_read_lock ();
830
- do {
831
- sequence = read_seqcount_begin (& nf_conntrack_generation );
832
- hash = hash_conntrack (net , tuple );
833
- ct_hash = nf_conntrack_hash ;
834
- } while (read_seqcount_retry (& nf_conntrack_generation , sequence ));
810
+ nf_conntrack_get_ht (& ct_hash , & hsize );
811
+ hash = __hash_conntrack (net , tuple , hsize );
835
812
836
813
hlist_nulls_for_each_entry_rcu (h , n , & ct_hash [hash ], hnnode ) {
837
814
ct = nf_ct_tuplehash_to_ctrack (h );
@@ -897,14 +874,11 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
897
874
898
875
for (i = 0 ; i < NF_CT_EVICTION_RANGE ; i ++ ) {
899
876
struct hlist_nulls_head * ct_hash ;
900
- unsigned hash , sequence , drops ;
877
+ unsigned int hash , hsize , drops ;
901
878
902
879
rcu_read_lock ();
903
- do {
904
- sequence = read_seqcount_begin (& nf_conntrack_generation );
905
- hash = scale_hash (_hash ++ );
906
- ct_hash = nf_conntrack_hash ;
907
- } while (read_seqcount_retry (& nf_conntrack_generation , sequence ));
880
+ nf_conntrack_get_ht (& ct_hash , & hsize );
881
+ hash = reciprocal_scale (_hash ++ , hsize );
908
882
909
883
drops = early_drop_list (net , & ct_hash [hash ]);
910
884
rcu_read_unlock ();
0 commit comments