Skip to content

Commit deed49d

Browse files
lxindavem330
authored andcommitted
route: check and remove route cache when we get route
Since the gc of ipv4 route was removed, the route cached would has no chance to be removed, and even it has been timeout, it still could be used, cause no code to check it's expires. Fix this issue by checking and removing route cache when we get route. Signed-off-by: Xin Long <[email protected]> Acked-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 619fe32 commit deed49d

File tree

2 files changed

+64
-14
lines changed

2 files changed

+64
-14
lines changed

include/net/ip_fib.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ struct fib_nh_exception {
6161
struct rtable __rcu *fnhe_rth_input;
6262
struct rtable __rcu *fnhe_rth_output;
6363
unsigned long fnhe_stamp;
64+
struct rcu_head rcu;
6465
};
6566

6667
struct fnhe_hash_bucket {

net/ipv4/route.c

Lines changed: 63 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129129
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130130
static int ip_rt_min_advmss __read_mostly = 256;
131131

132+
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
132133
/*
133134
* Interface to generic destination cache.
134135
*/
@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
755756
struct fib_nh *nh = &FIB_RES_NH(res);
756757

757758
update_or_create_fnhe(nh, fl4->daddr, new_gw,
758-
0, 0);
759+
0, jiffies + ip_rt_gc_timeout);
759760
}
760761
if (kill_route)
761762
rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
15561557
#endif
15571558
}
15581559

1560+
static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1561+
{
1562+
struct fnhe_hash_bucket *hash;
1563+
struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1564+
u32 hval = fnhe_hashfun(daddr);
1565+
1566+
spin_lock_bh(&fnhe_lock);
1567+
1568+
hash = rcu_dereference_protected(nh->nh_exceptions,
1569+
lockdep_is_held(&fnhe_lock));
1570+
hash += hval;
1571+
1572+
fnhe_p = &hash->chain;
1573+
fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1574+
while (fnhe) {
1575+
if (fnhe->fnhe_daddr == daddr) {
1576+
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1577+
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1578+
fnhe_flush_routes(fnhe);
1579+
kfree_rcu(fnhe, rcu);
1580+
break;
1581+
}
1582+
fnhe_p = &fnhe->fnhe_next;
1583+
fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1584+
lockdep_is_held(&fnhe_lock));
1585+
}
1586+
1587+
spin_unlock_bh(&fnhe_lock);
1588+
}
1589+
15591590
/* called in rcu_read_lock() section */
15601591
static int __mkroute_input(struct sk_buff *skb,
15611592
const struct fib_result *res,
@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
16091640

16101641
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
16111642
if (do_cache) {
1612-
if (fnhe)
1643+
if (fnhe) {
16131644
rth = rcu_dereference(fnhe->fnhe_rth_input);
1614-
else
1615-
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1645+
if (rth && rth->dst.expires &&
1646+
time_after(jiffies, rth->dst.expires)) {
1647+
ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1648+
fnhe = NULL;
1649+
} else {
1650+
goto rt_cache;
1651+
}
1652+
}
1653+
1654+
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
16161655

1656+
rt_cache:
16171657
if (rt_cache_valid(rth)) {
16181658
skb_dst_set_noref(skb, &rth->dst);
16191659
goto out;
@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
20142054
struct fib_nh *nh = &FIB_RES_NH(*res);
20152055

20162056
fnhe = find_exception(nh, fl4->daddr);
2017-
if (fnhe)
2057+
if (fnhe) {
20182058
prth = &fnhe->fnhe_rth_output;
2019-
else {
2020-
if (unlikely(fl4->flowi4_flags &
2021-
FLOWI_FLAG_KNOWN_NH &&
2022-
!(nh->nh_gw &&
2023-
nh->nh_scope == RT_SCOPE_LINK))) {
2024-
do_cache = false;
2025-
goto add;
2059+
rth = rcu_dereference(*prth);
2060+
if (rth && rth->dst.expires &&
2061+
time_after(jiffies, rth->dst.expires)) {
2062+
ip_del_fnhe(nh, fl4->daddr);
2063+
fnhe = NULL;
2064+
} else {
2065+
goto rt_cache;
20262066
}
2027-
prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
20282067
}
2068+
2069+
if (unlikely(fl4->flowi4_flags &
2070+
FLOWI_FLAG_KNOWN_NH &&
2071+
!(nh->nh_gw &&
2072+
nh->nh_scope == RT_SCOPE_LINK))) {
2073+
do_cache = false;
2074+
goto add;
2075+
}
2076+
prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
20292077
rth = rcu_dereference(*prth);
2078+
2079+
rt_cache:
20302080
if (rt_cache_valid(rth)) {
20312081
dst_hold(&rth->dst);
20322082
return rth;
@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
25692619
}
25702620

25712621
#ifdef CONFIG_SYSCTL
2572-
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
25732622
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
25742623
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
25752624
static int ip_rt_gc_elasticity __read_mostly = 8;

0 commit comments

Comments
 (0)