@@ -88,8 +88,10 @@ struct ip6_tnl_net {
88
88
struct ip6_tnl * * tnls [2 ];
89
89
};
90
90
91
- /* lock for the tunnel lists */
92
- static DEFINE_RWLOCK (ip6_tnl_lock );
91
+ /*
92
+ * Locking : hash tables are protected by RCU and a spinlock
93
+ */
94
+ static DEFINE_SPINLOCK (ip6_tnl_lock );
93
95
94
96
static inline struct dst_entry * ip6_tnl_dst_check (struct ip6_tnl * t )
95
97
{
@@ -130,6 +132,9 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
130
132
* else %NULL
131
133
**/
132
134
135
+ #define for_each_ip6_tunnel_rcu (start ) \
136
+ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
137
+
133
138
static struct ip6_tnl *
134
139
ip6_tnl_lookup (struct net * net , struct in6_addr * remote , struct in6_addr * local )
135
140
{
@@ -138,13 +143,14 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
138
143
struct ip6_tnl * t ;
139
144
struct ip6_tnl_net * ip6n = net_generic (net , ip6_tnl_net_id );
140
145
141
- for ( t = ip6n -> tnls_r_l [h0 ^ h1 ]; t ; t = t -> next ) {
146
+ for_each_ip6_tunnel_rcu ( ip6n -> tnls_r_l [h0 ^ h1 ]) {
142
147
if (ipv6_addr_equal (local , & t -> parms .laddr ) &&
143
148
ipv6_addr_equal (remote , & t -> parms .raddr ) &&
144
149
(t -> dev -> flags & IFF_UP ))
145
150
return t ;
146
151
}
147
- if ((t = ip6n -> tnls_wc [0 ]) != NULL && (t -> dev -> flags & IFF_UP ))
152
+ t = rcu_dereference (ip6n -> tnls_wc [0 ]);
153
+ if (t && (t -> dev -> flags & IFF_UP ))
148
154
return t ;
149
155
150
156
return NULL ;
@@ -186,10 +192,10 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
186
192
{
187
193
struct ip6_tnl * * tp = ip6_tnl_bucket (ip6n , & t -> parms );
188
194
195
+ spin_lock_bh (& ip6_tnl_lock );
189
196
t -> next = * tp ;
190
- write_lock_bh (& ip6_tnl_lock );
191
- * tp = t ;
192
- write_unlock_bh (& ip6_tnl_lock );
197
+ rcu_assign_pointer (* tp , t );
198
+ spin_unlock_bh (& ip6_tnl_lock );
193
199
}
194
200
195
201
/**
@@ -204,9 +210,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
204
210
205
211
for (tp = ip6_tnl_bucket (ip6n , & t -> parms ); * tp ; tp = & (* tp )-> next ) {
206
212
if (t == * tp ) {
207
- write_lock_bh (& ip6_tnl_lock );
213
+ spin_lock_bh (& ip6_tnl_lock );
208
214
* tp = t -> next ;
209
- write_unlock_bh (& ip6_tnl_lock );
215
+ spin_unlock_bh (& ip6_tnl_lock );
210
216
break ;
211
217
}
212
218
}
@@ -313,9 +319,9 @@ ip6_tnl_dev_uninit(struct net_device *dev)
313
319
struct ip6_tnl_net * ip6n = net_generic (net , ip6_tnl_net_id );
314
320
315
321
if (dev == ip6n -> fb_tnl_dev ) {
316
- write_lock_bh (& ip6_tnl_lock );
322
+ spin_lock_bh (& ip6_tnl_lock );
317
323
ip6n -> tnls_wc [0 ] = NULL ;
318
- write_unlock_bh (& ip6_tnl_lock );
324
+ spin_unlock_bh (& ip6_tnl_lock );
319
325
} else {
320
326
ip6_tnl_unlink (ip6n , t );
321
327
}
@@ -409,7 +415,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
409
415
in trouble since we might need the source address for further
410
416
processing of the error. */
411
417
412
- read_lock ( & ip6_tnl_lock );
418
+ rcu_read_lock ( );
413
419
if ((t = ip6_tnl_lookup (dev_net (skb -> dev ), & ipv6h -> daddr ,
414
420
& ipv6h -> saddr )) == NULL )
415
421
goto out ;
@@ -482,7 +488,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
482
488
* msg = rel_msg ;
483
489
484
490
out :
485
- read_unlock ( & ip6_tnl_lock );
491
+ rcu_read_unlock ( );
486
492
return err ;
487
493
}
488
494
@@ -693,23 +699,23 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
693
699
struct ip6_tnl * t ;
694
700
struct ipv6hdr * ipv6h = ipv6_hdr (skb );
695
701
696
- read_lock ( & ip6_tnl_lock );
702
+ rcu_read_lock ( );
697
703
698
704
if ((t = ip6_tnl_lookup (dev_net (skb -> dev ), & ipv6h -> saddr ,
699
705
& ipv6h -> daddr )) != NULL ) {
700
706
if (t -> parms .proto != ipproto && t -> parms .proto != 0 ) {
701
- read_unlock ( & ip6_tnl_lock );
707
+ rcu_read_unlock ( );
702
708
goto discard ;
703
709
}
704
710
705
711
if (!xfrm6_policy_check (NULL , XFRM_POLICY_IN , skb )) {
706
- read_unlock ( & ip6_tnl_lock );
712
+ rcu_read_unlock ( );
707
713
goto discard ;
708
714
}
709
715
710
716
if (!ip6_tnl_rcv_ctl (t )) {
711
717
t -> dev -> stats .rx_dropped ++ ;
712
- read_unlock ( & ip6_tnl_lock );
718
+ rcu_read_unlock ( );
713
719
goto discard ;
714
720
}
715
721
secpath_reset (skb );
@@ -727,10 +733,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
727
733
t -> dev -> stats .rx_packets ++ ;
728
734
t -> dev -> stats .rx_bytes += skb -> len ;
729
735
netif_rx (skb );
730
- read_unlock ( & ip6_tnl_lock );
736
+ rcu_read_unlock ( );
731
737
return 0 ;
732
738
}
733
- read_unlock ( & ip6_tnl_lock );
739
+ rcu_read_unlock ( );
734
740
return 1 ;
735
741
736
742
discard :
0 commit comments