@@ -80,6 +80,27 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
80
80
}
81
81
EXPORT_SYMBOL_GPL (nf_unregister_queue_handlers );
82
82
83
+ static void nf_queue_entry_release_refs (struct nf_queue_entry * entry )
84
+ {
85
+ /* Release those devices we held, or Alexey will kill me. */
86
+ if (entry -> indev )
87
+ dev_put (entry -> indev );
88
+ if (entry -> outdev )
89
+ dev_put (entry -> outdev );
90
+ #ifdef CONFIG_BRIDGE_NETFILTER
91
+ if (entry -> skb -> nf_bridge ) {
92
+ struct nf_bridge_info * nf_bridge = entry -> skb -> nf_bridge ;
93
+
94
+ if (nf_bridge -> physindev )
95
+ dev_put (nf_bridge -> physindev );
96
+ if (nf_bridge -> physoutdev )
97
+ dev_put (nf_bridge -> physoutdev );
98
+ }
99
+ #endif
100
+ /* Drop reference to owner of hook which queued us. */
101
+ module_put (entry -> elem -> owner );
102
+ }
103
+
83
104
/*
84
105
* Any packet that leaves via this function must come back
85
106
* through nf_reinject().
@@ -93,10 +114,10 @@ static int __nf_queue(struct sk_buff *skb,
93
114
unsigned int queuenum )
94
115
{
95
116
int status ;
96
- struct nf_queue_entry * entry ;
117
+ struct nf_queue_entry * entry = NULL ;
97
118
#ifdef CONFIG_BRIDGE_NETFILTER
98
- struct net_device * physindev = NULL ;
99
- struct net_device * physoutdev = NULL ;
119
+ struct net_device * physindev ;
120
+ struct net_device * physoutdev ;
100
121
#endif
101
122
struct nf_afinfo * afinfo ;
102
123
const struct nf_queue_handler * qh ;
@@ -105,28 +126,16 @@ static int __nf_queue(struct sk_buff *skb,
105
126
rcu_read_lock ();
106
127
107
128
qh = rcu_dereference (queue_handler [pf ]);
108
- if (!qh ) {
109
- rcu_read_unlock ();
110
- kfree_skb (skb );
111
- return 1 ;
112
- }
129
+ if (!qh )
130
+ goto err_unlock ;
113
131
114
132
afinfo = nf_get_afinfo (pf );
115
- if (!afinfo ) {
116
- rcu_read_unlock ();
117
- kfree_skb (skb );
118
- return 1 ;
119
- }
133
+ if (!afinfo )
134
+ goto err_unlock ;
120
135
121
136
entry = kmalloc (sizeof (* entry ) + afinfo -> route_key_size , GFP_ATOMIC );
122
- if (!entry ) {
123
- if (net_ratelimit ())
124
- printk (KERN_ERR "OOM queueing packet %p\n" ,
125
- skb );
126
- rcu_read_unlock ();
127
- kfree_skb (skb );
128
- return 1 ;
129
- }
137
+ if (!entry )
138
+ goto err_unlock ;
130
139
131
140
* entry = (struct nf_queue_entry ) {
132
141
.skb = skb ,
@@ -166,25 +175,18 @@ static int __nf_queue(struct sk_buff *skb,
166
175
rcu_read_unlock ();
167
176
168
177
if (status < 0 ) {
169
- /* James M doesn't say fuck enough. */
170
- if (indev )
171
- dev_put (indev );
172
- if (outdev )
173
- dev_put (outdev );
174
- #ifdef CONFIG_BRIDGE_NETFILTER
175
- if (physindev )
176
- dev_put (physindev );
177
- if (physoutdev )
178
- dev_put (physoutdev );
179
- #endif
180
- module_put (entry -> elem -> owner );
181
- kfree (entry );
182
- kfree_skb (skb );
183
-
184
- return 1 ;
178
+ nf_queue_entry_release_refs (entry );
179
+ goto err ;
185
180
}
186
181
187
182
return 1 ;
183
+
184
+ err_unlock :
185
+ rcu_read_unlock ();
186
+ err :
187
+ kfree_skb (skb );
188
+ kfree (entry );
189
+ return 1 ;
188
190
}
189
191
190
192
int nf_queue (struct sk_buff * skb ,
@@ -235,22 +237,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
235
237
236
238
rcu_read_lock ();
237
239
238
- /* Release those devices we held, or Alexey will kill me. */
239
- if (entry -> indev )
240
- dev_put (entry -> indev );
241
- if (entry -> outdev )
242
- dev_put (entry -> outdev );
243
- #ifdef CONFIG_BRIDGE_NETFILTER
244
- if (skb -> nf_bridge ) {
245
- if (skb -> nf_bridge -> physindev )
246
- dev_put (skb -> nf_bridge -> physindev );
247
- if (skb -> nf_bridge -> physoutdev )
248
- dev_put (skb -> nf_bridge -> physoutdev );
249
- }
250
- #endif
251
-
252
- /* Drop reference to owner of hook which queued us. */
253
- module_put (entry -> elem -> owner );
240
+ nf_queue_entry_release_refs (entry );
254
241
255
242
/* Continue traversal iff userspace said ok... */
256
243
if (verdict == NF_REPEAT ) {
0 commit comments