@@ -525,21 +525,6 @@ clean_from_lists(struct nf_conn *ct)
525
525
nf_ct_remove_expectations (ct );
526
526
}
527
527
528
- /* must be called with local_bh_disable */
529
- static void nf_ct_add_to_dying_list (struct nf_conn * ct )
530
- {
531
- struct ct_pcpu * pcpu ;
532
-
533
- /* add this conntrack to the (per cpu) dying list */
534
- ct -> cpu = smp_processor_id ();
535
- pcpu = per_cpu_ptr (nf_ct_net (ct )-> ct .pcpu_lists , ct -> cpu );
536
-
537
- spin_lock (& pcpu -> lock );
538
- hlist_nulls_add_head (& ct -> tuplehash [IP_CT_DIR_ORIGINAL ].hnnode ,
539
- & pcpu -> dying );
540
- spin_unlock (& pcpu -> lock );
541
- }
542
-
543
528
/* must be called with local_bh_disable */
544
529
static void nf_ct_add_to_unconfirmed_list (struct nf_conn * ct )
545
530
{
@@ -556,11 +541,11 @@ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
556
541
}
557
542
558
543
/* must be called with local_bh_disable */
559
- static void nf_ct_del_from_dying_or_unconfirmed_list (struct nf_conn * ct )
544
+ static void nf_ct_del_from_unconfirmed_list (struct nf_conn * ct )
560
545
{
561
546
struct ct_pcpu * pcpu ;
562
547
563
- /* We overload first tuple to link into unconfirmed or dying list.*/
548
+ /* We overload first tuple to link into unconfirmed list.*/
564
549
pcpu = per_cpu_ptr (nf_ct_net (ct )-> ct .pcpu_lists , ct -> cpu );
565
550
566
551
spin_lock (& pcpu -> lock );
@@ -648,7 +633,8 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
648
633
*/
649
634
nf_ct_remove_expectations (ct );
650
635
651
- nf_ct_del_from_dying_or_unconfirmed_list (ct );
636
+ if (unlikely (!nf_ct_is_confirmed (ct )))
637
+ nf_ct_del_from_unconfirmed_list (ct );
652
638
653
639
local_bh_enable ();
654
640
@@ -686,7 +672,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
686
672
local_bh_disable ();
687
673
688
674
__nf_ct_delete_from_lists (ct );
689
- nf_ct_add_to_dying_list (ct );
690
675
691
676
local_bh_enable ();
692
677
}
@@ -700,8 +685,6 @@ static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
700
685
hlist_nulls_add_head_rcu (& ct -> tuplehash [IP_CT_DIR_ORIGINAL ].hnnode ,
701
686
& cnet -> ecache .dying_list );
702
687
spin_unlock (& cnet -> ecache .dying_lock );
703
- #else
704
- nf_ct_add_to_dying_list (ct );
705
688
#endif
706
689
}
707
690
@@ -995,7 +978,6 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
995
978
struct nf_conn_tstamp * tstamp ;
996
979
997
980
refcount_inc (& ct -> ct_general .use );
998
- ct -> status |= IPS_CONFIRMED ;
999
981
1000
982
/* set conntrack timestamp, if enabled. */
1001
983
tstamp = nf_conn_tstamp_find (ct );
@@ -1024,7 +1006,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
1024
1006
nf_conntrack_get (& ct -> ct_general );
1025
1007
1026
1008
nf_ct_acct_merge (ct , ctinfo , loser_ct );
1027
- nf_ct_add_to_dying_list (loser_ct );
1028
1009
nf_ct_put (loser_ct );
1029
1010
nf_ct_set (skb , ct , ctinfo );
1030
1011
@@ -1157,7 +1138,6 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
1157
1138
return ret ;
1158
1139
1159
1140
drop :
1160
- nf_ct_add_to_dying_list (loser_ct );
1161
1141
NF_CT_STAT_INC (net , drop );
1162
1142
NF_CT_STAT_INC (net , insert_failed );
1163
1143
return NF_DROP ;
@@ -1224,10 +1204,10 @@ __nf_conntrack_confirm(struct sk_buff *skb)
1224
1204
* user context, else we insert an already 'dead' hash, blocking
1225
1205
* further use of that particular connection -JM.
1226
1206
*/
1227
- nf_ct_del_from_dying_or_unconfirmed_list (ct );
1207
+ nf_ct_del_from_unconfirmed_list (ct );
1208
+ ct -> status |= IPS_CONFIRMED ;
1228
1209
1229
1210
if (unlikely (nf_ct_is_dying (ct ))) {
1230
- nf_ct_add_to_dying_list (ct );
1231
1211
NF_CT_STAT_INC (net , insert_failed );
1232
1212
goto dying ;
1233
1213
}
@@ -1251,7 +1231,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
1251
1231
goto out ;
1252
1232
if (chainlen ++ > max_chainlen ) {
1253
1233
chaintoolong :
1254
- nf_ct_add_to_dying_list (ct );
1255
1234
NF_CT_STAT_INC (net , chaintoolong );
1256
1235
NF_CT_STAT_INC (net , insert_failed );
1257
1236
ret = NF_DROP ;
@@ -2800,7 +2779,6 @@ void nf_conntrack_init_end(void)
2800
2779
* We need to use special "null" values, not used in hash table
2801
2780
*/
2802
2781
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
2803
- #define DYING_NULLS_VAL ((1<<30)+1)
2804
2782
2805
2783
int nf_conntrack_init_net (struct net * net )
2806
2784
{
@@ -2821,7 +2799,6 @@ int nf_conntrack_init_net(struct net *net)
2821
2799
2822
2800
spin_lock_init (& pcpu -> lock );
2823
2801
INIT_HLIST_NULLS_HEAD (& pcpu -> unconfirmed , UNCONFIRMED_NULLS_VAL );
2824
- INIT_HLIST_NULLS_HEAD (& pcpu -> dying , DYING_NULLS_VAL );
2825
2802
}
2826
2803
2827
2804
net -> ct .stat = alloc_percpu (struct ip_conntrack_stat );
0 commit comments