@@ -834,67 +834,66 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
834
834
835
835
/* There's a small race here where we may free a just-assured
836
836
connection. Too bad: we're in trouble anyway. */
837
- static noinline int early_drop (struct net * net , unsigned int _hash )
837
+ static unsigned int early_drop_list (struct net * net ,
838
+ struct hlist_nulls_head * head )
838
839
{
839
- /* Use oldest entry, which is roughly LRU */
840
840
struct nf_conntrack_tuple_hash * h ;
841
- struct nf_conn * tmp ;
842
841
struct hlist_nulls_node * n ;
843
- unsigned int i , hash , sequence ;
844
- struct nf_conn * ct = NULL ;
845
- spinlock_t * lockp ;
846
- bool ret = false;
842
+ unsigned int drops = 0 ;
843
+ struct nf_conn * tmp ;
847
844
848
- i = 0 ;
845
+ hlist_nulls_for_each_entry_rcu (h , n , head , hnnode ) {
846
+ tmp = nf_ct_tuplehash_to_ctrack (h );
849
847
850
- local_bh_disable ();
851
- restart :
852
- sequence = read_seqcount_begin (& nf_conntrack_generation );
853
- for (; i < NF_CT_EVICTION_RANGE ; i ++ ) {
854
- hash = scale_hash (_hash ++ );
855
- lockp = & nf_conntrack_locks [hash % CONNTRACK_LOCKS ];
856
- nf_conntrack_lock (lockp );
857
- if (read_seqcount_retry (& nf_conntrack_generation , sequence )) {
858
- spin_unlock (lockp );
859
- goto restart ;
860
- }
861
- hlist_nulls_for_each_entry_rcu (h , n , & nf_conntrack_hash [hash ],
862
- hnnode ) {
863
- tmp = nf_ct_tuplehash_to_ctrack (h );
864
-
865
- if (test_bit (IPS_ASSURED_BIT , & tmp -> status ) ||
866
- !net_eq (nf_ct_net (tmp ), net ) ||
867
- nf_ct_is_dying (tmp ))
868
- continue ;
869
-
870
- if (atomic_inc_not_zero (& tmp -> ct_general .use )) {
871
- ct = tmp ;
872
- break ;
873
- }
874
- }
848
+ if (test_bit (IPS_ASSURED_BIT , & tmp -> status ) ||
849
+ !net_eq (nf_ct_net (tmp ), net ) ||
850
+ nf_ct_is_dying (tmp ))
851
+ continue ;
875
852
876
- spin_unlock (lockp );
877
- if (ct )
878
- break ;
853
+ if (!atomic_inc_not_zero (& tmp -> ct_general .use ))
854
+ continue ;
855
+
856
+ /* kill only if still in same netns -- might have moved due to
857
+ * SLAB_DESTROY_BY_RCU rules.
858
+ *
859
+ * We steal the timer reference. If that fails timer has
860
+ * already fired or someone else deleted it. Just drop ref
861
+ * and move to next entry.
862
+ */
863
+ if (net_eq (nf_ct_net (tmp ), net ) &&
864
+ nf_ct_is_confirmed (tmp ) &&
865
+ del_timer (& tmp -> timeout ) &&
866
+ nf_ct_delete (tmp , 0 , 0 ))
867
+ drops ++ ;
868
+
869
+ nf_ct_put (tmp );
879
870
}
880
871
881
- local_bh_enable ();
872
+ return drops ;
873
+ }
882
874
883
- if (!ct )
884
- return false;
875
+ static noinline int early_drop (struct net * net , unsigned int _hash )
876
+ {
877
+ unsigned int i ;
885
878
886
- /* kill only if in same netns -- might have moved due to
887
- * SLAB_DESTROY_BY_RCU rules
888
- */
889
- if (net_eq (nf_ct_net (ct ), net ) && del_timer (& ct -> timeout )) {
890
- if (nf_ct_delete (ct , 0 , 0 )) {
891
- NF_CT_STAT_INC_ATOMIC (net , early_drop );
892
- ret = true;
879
+ for (i = 0 ; i < NF_CT_EVICTION_RANGE ; i ++ ) {
880
+ struct hlist_nulls_head * ct_hash ;
881
+ unsigned hash , sequence , drops ;
882
+
883
+ do {
884
+ sequence = read_seqcount_begin (& nf_conntrack_generation );
885
+ hash = scale_hash (_hash ++ );
886
+ ct_hash = nf_conntrack_hash ;
887
+ } while (read_seqcount_retry (& nf_conntrack_generation , sequence ));
888
+
889
+ drops = early_drop_list (net , & ct_hash [hash ]);
890
+ if (drops ) {
891
+ NF_CT_STAT_ADD_ATOMIC (net , early_drop , drops );
892
+ return true;
893
893
}
894
894
}
895
895
896
- nf_ct_put (ct );
897
- return ret ;
896
+ return false;
898
897
}
899
898
900
899
static struct nf_conn *
0 commit comments