@@ -112,13 +112,22 @@ static void flow_offload_mangle(struct flow_action_entry *entry,
112
112
memcpy (& entry -> mangle .val , value , sizeof (u32 ));
113
113
}
114
114
115
+ static inline struct flow_action_entry *
116
+ flow_action_entry_next (struct nf_flow_rule * flow_rule )
117
+ {
118
+ int i = flow_rule -> rule -> action .num_entries ++ ;
119
+
120
+ return & flow_rule -> rule -> action .entries [i ];
121
+ }
122
+
115
123
static int flow_offload_eth_src (struct net * net ,
116
124
const struct flow_offload * flow ,
117
125
enum flow_offload_tuple_dir dir ,
118
- struct flow_action_entry * entry0 ,
119
- struct flow_action_entry * entry1 )
126
+ struct nf_flow_rule * flow_rule )
120
127
{
121
128
const struct flow_offload_tuple * tuple = & flow -> tuplehash [!dir ].tuple ;
129
+ struct flow_action_entry * entry0 = flow_action_entry_next (flow_rule );
130
+ struct flow_action_entry * entry1 = flow_action_entry_next (flow_rule );
122
131
struct net_device * dev ;
123
132
u32 mask , val ;
124
133
u16 val16 ;
@@ -145,10 +154,11 @@ static int flow_offload_eth_src(struct net *net,
145
154
static int flow_offload_eth_dst (struct net * net ,
146
155
const struct flow_offload * flow ,
147
156
enum flow_offload_tuple_dir dir ,
148
- struct flow_action_entry * entry0 ,
149
- struct flow_action_entry * entry1 )
157
+ struct nf_flow_rule * flow_rule )
150
158
{
151
159
const struct flow_offload_tuple * tuple = & flow -> tuplehash [dir ].tuple ;
160
+ struct flow_action_entry * entry0 = flow_action_entry_next (flow_rule );
161
+ struct flow_action_entry * entry1 = flow_action_entry_next (flow_rule );
152
162
struct neighbour * n ;
153
163
u32 mask , val ;
154
164
u16 val16 ;
@@ -175,8 +185,9 @@ static int flow_offload_eth_dst(struct net *net,
175
185
static void flow_offload_ipv4_snat (struct net * net ,
176
186
const struct flow_offload * flow ,
177
187
enum flow_offload_tuple_dir dir ,
178
- struct flow_action_entry * entry )
188
+ struct nf_flow_rule * flow_rule )
179
189
{
190
+ struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
180
191
u32 mask = ~htonl (0xffffffff );
181
192
__be32 addr ;
182
193
u32 offset ;
@@ -201,8 +212,9 @@ static void flow_offload_ipv4_snat(struct net *net,
201
212
static void flow_offload_ipv4_dnat (struct net * net ,
202
213
const struct flow_offload * flow ,
203
214
enum flow_offload_tuple_dir dir ,
204
- struct flow_action_entry * entry )
215
+ struct nf_flow_rule * flow_rule )
205
216
{
217
+ struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
206
218
u32 mask = ~htonl (0xffffffff );
207
219
__be32 addr ;
208
220
u32 offset ;
@@ -246,8 +258,9 @@ static int flow_offload_l4proto(const struct flow_offload *flow)
246
258
static void flow_offload_port_snat (struct net * net ,
247
259
const struct flow_offload * flow ,
248
260
enum flow_offload_tuple_dir dir ,
249
- struct flow_action_entry * entry )
261
+ struct nf_flow_rule * flow_rule )
250
262
{
263
+ struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
251
264
u32 mask = ~htonl (0xffff0000 );
252
265
__be16 port ;
253
266
u32 offset ;
@@ -272,8 +285,9 @@ static void flow_offload_port_snat(struct net *net,
272
285
static void flow_offload_port_dnat (struct net * net ,
273
286
const struct flow_offload * flow ,
274
287
enum flow_offload_tuple_dir dir ,
275
- struct flow_action_entry * entry )
288
+ struct nf_flow_rule * flow_rule )
276
289
{
290
+ struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
277
291
u32 mask = ~htonl (0xffff );
278
292
__be16 port ;
279
293
u32 offset ;
@@ -297,9 +311,10 @@ static void flow_offload_port_dnat(struct net *net,
297
311
298
312
static void flow_offload_ipv4_checksum (struct net * net ,
299
313
const struct flow_offload * flow ,
300
- struct flow_action_entry * entry )
314
+ struct nf_flow_rule * flow_rule )
301
315
{
302
316
u8 protonum = flow -> tuplehash [FLOW_OFFLOAD_DIR_ORIGINAL ].tuple .l4proto ;
317
+ struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
303
318
304
319
entry -> id = FLOW_ACTION_CSUM ;
305
320
entry -> csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR ;
@@ -316,8 +331,9 @@ static void flow_offload_ipv4_checksum(struct net *net,
316
331
317
332
static void flow_offload_redirect (const struct flow_offload * flow ,
318
333
enum flow_offload_tuple_dir dir ,
319
- struct flow_action_entry * entry )
334
+ struct nf_flow_rule * flow_rule )
320
335
{
336
+ struct flow_action_entry * entry = flow_action_entry_next (flow_rule );
321
337
struct rtable * rt ;
322
338
323
339
rt = (struct rtable * )flow -> tuplehash [dir ].tuple .dst_cache ;
@@ -330,39 +346,25 @@ int nf_flow_rule_route(struct net *net, const struct flow_offload *flow,
330
346
enum flow_offload_tuple_dir dir ,
331
347
struct nf_flow_rule * flow_rule )
332
348
{
333
- int i ;
334
-
335
- if (flow_offload_eth_src (net , flow , dir ,
336
- & flow_rule -> rule -> action .entries [0 ],
337
- & flow_rule -> rule -> action .entries [1 ]) < 0 )
349
+ if (flow_offload_eth_src (net , flow , dir , flow_rule ) < 0 ||
350
+ flow_offload_eth_dst (net , flow , dir , flow_rule ) < 0 )
338
351
return -1 ;
339
352
340
- if (flow_offload_eth_dst (net , flow , dir ,
341
- & flow_rule -> rule -> action .entries [2 ],
342
- & flow_rule -> rule -> action .entries [3 ]) < 0 )
343
- return -1 ;
344
-
345
- i = 4 ;
346
353
if (flow -> flags & FLOW_OFFLOAD_SNAT ) {
347
- flow_offload_ipv4_snat (net , flow , dir ,
348
- & flow_rule -> rule -> action .entries [i ++ ]);
349
- flow_offload_port_snat (net , flow , dir ,
350
- & flow_rule -> rule -> action .entries [i ++ ]);
354
+ flow_offload_ipv4_snat (net , flow , dir , flow_rule );
355
+ flow_offload_port_snat (net , flow , dir , flow_rule );
351
356
}
352
357
if (flow -> flags & FLOW_OFFLOAD_DNAT ) {
353
- flow_offload_ipv4_dnat (net , flow , dir ,
354
- & flow_rule -> rule -> action .entries [i ++ ]);
355
- flow_offload_port_dnat (net , flow , dir ,
356
- & flow_rule -> rule -> action .entries [i ++ ]);
358
+ flow_offload_ipv4_dnat (net , flow , dir , flow_rule );
359
+ flow_offload_port_dnat (net , flow , dir , flow_rule );
357
360
}
358
361
if (flow -> flags & FLOW_OFFLOAD_SNAT ||
359
362
flow -> flags & FLOW_OFFLOAD_DNAT )
360
- flow_offload_ipv4_checksum (net , flow ,
361
- & flow_rule -> rule -> action .entries [i ++ ]);
363
+ flow_offload_ipv4_checksum (net , flow , flow_rule );
362
364
363
- flow_offload_redirect (flow , dir , & flow_rule -> rule -> action . entries [ i ++ ] );
365
+ flow_offload_redirect (flow , dir , flow_rule );
364
366
365
- return i ;
367
+ return 0 ;
366
368
}
367
369
EXPORT_SYMBOL_GPL (nf_flow_rule_route );
368
370
@@ -375,7 +377,7 @@ nf_flow_offload_rule_alloc(struct net *net,
375
377
const struct flow_offload * flow = offload -> flow ;
376
378
const struct flow_offload_tuple * tuple ;
377
379
struct nf_flow_rule * flow_rule ;
378
- int err = - ENOMEM , num_actions ;
380
+ int err = - ENOMEM ;
379
381
380
382
flow_rule = kzalloc (sizeof (* flow_rule ), GFP_KERNEL );
381
383
if (!flow_rule )
@@ -394,12 +396,10 @@ nf_flow_offload_rule_alloc(struct net *net,
394
396
if (err < 0 )
395
397
goto err_flow_match ;
396
398
397
- num_actions = flowtable -> type -> action ( net , flow , dir , flow_rule ) ;
398
- if (num_actions < 0 )
399
+ flow_rule -> rule -> action . num_entries = 0 ;
400
+ if (flowtable -> type -> action ( net , flow , dir , flow_rule ) < 0 )
399
401
goto err_flow_match ;
400
402
401
- flow_rule -> rule -> action .num_entries = num_actions ;
402
-
403
403
return flow_rule ;
404
404
405
405
err_flow_match :
0 commit comments