@@ -170,11 +170,10 @@ static void esp_output_restore_header(struct sk_buff *skb)
170
170
}
171
171
172
172
static struct ip_esp_hdr * esp_output_set_esn (struct sk_buff * skb ,
173
+ struct xfrm_state * x ,
173
174
struct ip_esp_hdr * esph ,
174
175
__be32 * seqhi )
175
176
{
176
- struct xfrm_state * x = skb_dst (skb )-> xfrm ;
177
-
178
177
/* For ESN we move the header forward by 4 bytes to
179
178
* accomodate the high bits. We will move it back after
180
179
* encryption.
@@ -214,59 +213,15 @@ static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
214
213
tail [plen - 1 ] = proto ;
215
214
}
216
215
217
- static int esp6_output (struct xfrm_state * x , struct sk_buff * skb )
216
+ int esp6_output_head (struct xfrm_state * x , struct sk_buff * skb , struct esp_info * esp )
218
217
{
219
- int err ;
220
- struct ip_esp_hdr * esph ;
221
- struct crypto_aead * aead ;
222
- struct aead_request * req ;
223
- struct scatterlist * sg , * dsg ;
224
- struct sk_buff * trailer ;
225
- struct page * page ;
226
- void * tmp ;
227
- int blksize ;
228
- int clen ;
229
- int alen ;
230
- int plen ;
231
- int ivlen ;
232
- int tfclen ;
233
- int nfrags ;
234
- int assoclen ;
235
- int seqhilen ;
236
- int tailen ;
237
- u8 * iv ;
238
218
u8 * tail ;
239
219
u8 * vaddr ;
240
- __be32 * seqhi ;
241
- __be64 seqno ;
242
- __u8 proto = * skb_mac_header (skb );
243
-
244
- /* skb is pure payload to encrypt */
245
- aead = x -> data ;
246
- alen = crypto_aead_authsize (aead );
247
- ivlen = crypto_aead_ivsize (aead );
248
-
249
- tfclen = 0 ;
250
- if (x -> tfcpad ) {
251
- struct xfrm_dst * dst = (struct xfrm_dst * )skb_dst (skb );
252
- u32 padto ;
253
-
254
- padto = min (x -> tfcpad , esp6_get_mtu (x , dst -> child_mtu_cached ));
255
- if (skb -> len < padto )
256
- tfclen = padto - skb -> len ;
257
- }
258
- blksize = ALIGN (crypto_aead_blocksize (aead ), 4 );
259
- clen = ALIGN (skb -> len + 2 + tfclen , blksize );
260
- plen = clen - skb -> len - tfclen ;
261
- tailen = tfclen + plen + alen ;
262
-
263
- assoclen = sizeof (* esph );
264
- seqhilen = 0 ;
265
-
266
- if (x -> props .flags & XFRM_STATE_ESN ) {
267
- seqhilen += sizeof (__be32 );
268
- assoclen += seqhilen ;
269
- }
220
+ int nfrags ;
221
+ struct page * page ;
222
+ struct ip_esp_hdr * esph ;
223
+ struct sk_buff * trailer ;
224
+ int tailen = esp -> tailen ;
270
225
271
226
* skb_mac_header (skb ) = IPPROTO_ESP ;
272
227
esph = ip_esp_hdr (skb );
@@ -284,6 +239,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
284
239
struct sock * sk = skb -> sk ;
285
240
struct page_frag * pfrag = & x -> xfrag ;
286
241
242
+ esp -> inplace = false;
243
+
287
244
allocsize = ALIGN (tailen , L1_CACHE_BYTES );
288
245
289
246
spin_lock_bh (& x -> lock );
@@ -300,10 +257,12 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
300
257
301
258
tail = vaddr + pfrag -> offset ;
302
259
303
- esp_output_fill_trailer (tail , tfclen , plen , proto );
260
+ esp_output_fill_trailer (tail , esp -> tfclen , esp -> plen , esp -> proto );
304
261
305
262
kunmap_atomic (vaddr );
306
263
264
+ spin_unlock_bh (& x -> lock );
265
+
307
266
nfrags = skb_shinfo (skb )-> nr_frags ;
308
267
309
268
__skb_fill_page_desc (skb , nfrags , page , pfrag -> offset ,
@@ -319,77 +278,56 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
319
278
if (sk )
320
279
atomic_add (tailen , & sk -> sk_wmem_alloc );
321
280
322
- skb_push (skb , - skb_network_offset (skb ));
323
-
324
- esph -> seq_no = htonl (XFRM_SKB_CB (skb )-> seq .output .low );
325
- esph -> spi = x -> id .spi ;
326
-
327
- tmp = esp_alloc_tmp (aead , nfrags + 2 , seqhilen );
328
- if (!tmp ) {
329
- spin_unlock_bh (& x -> lock );
330
- err = - ENOMEM ;
331
- goto error ;
332
- }
333
- seqhi = esp_tmp_seqhi (tmp );
334
- iv = esp_tmp_iv (aead , tmp , seqhilen );
335
- req = esp_tmp_req (aead , iv );
336
- sg = esp_req_sg (aead , req );
337
- dsg = & sg [nfrags ];
338
-
339
- esph = esp_output_set_esn (skb , esph , seqhi );
340
-
341
- sg_init_table (sg , nfrags );
342
- skb_to_sgvec (skb , sg ,
343
- (unsigned char * )esph - skb -> data ,
344
- assoclen + ivlen + clen + alen );
345
-
346
- allocsize = ALIGN (skb -> data_len , L1_CACHE_BYTES );
347
-
348
- if (unlikely (!skb_page_frag_refill (allocsize , pfrag , GFP_ATOMIC ))) {
349
- spin_unlock_bh (& x -> lock );
350
- err = - ENOMEM ;
351
- goto error ;
352
- }
353
-
354
- skb_shinfo (skb )-> nr_frags = 1 ;
355
-
356
- page = pfrag -> page ;
357
- get_page (page );
358
- /* replace page frags in skb with new page */
359
- __skb_fill_page_desc (skb , 0 , page , pfrag -> offset , skb -> data_len );
360
- pfrag -> offset = pfrag -> offset + allocsize ;
361
-
362
- sg_init_table (dsg , skb_shinfo (skb )-> nr_frags + 1 );
363
- skb_to_sgvec (skb , dsg ,
364
- (unsigned char * )esph - skb -> data ,
365
- assoclen + ivlen + clen + alen );
366
-
367
- spin_unlock_bh (& x -> lock );
368
-
369
- goto skip_cow2 ;
281
+ goto out ;
370
282
}
371
283
}
372
284
373
285
cow :
374
- err = skb_cow_data (skb , tailen , & trailer );
375
- if (err < 0 )
376
- goto error ;
377
- nfrags = err ;
378
-
286
+ nfrags = skb_cow_data (skb , tailen , & trailer );
287
+ if (nfrags < 0 )
288
+ goto out ;
379
289
tail = skb_tail_pointer (trailer );
380
- esph = ip_esp_hdr (skb );
381
290
382
291
skip_cow :
383
- esp_output_fill_trailer (tail , tfclen , plen , proto );
292
+ esp_output_fill_trailer (tail , esp -> tfclen , esp -> plen , esp -> proto );
293
+ pskb_put (skb , trailer , tailen );
384
294
385
- pskb_put (skb , trailer , clen - skb -> len + alen );
386
- skb_push (skb , - skb_network_offset (skb ));
295
+ out :
296
+ return nfrags ;
297
+ }
298
+ EXPORT_SYMBOL_GPL (esp6_output_head );
387
299
388
- esph -> seq_no = htonl (XFRM_SKB_CB (skb )-> seq .output .low );
389
- esph -> spi = x -> id .spi ;
300
+ int esp6_output_tail (struct xfrm_state * x , struct sk_buff * skb , struct esp_info * esp )
301
+ {
302
+ u8 * iv ;
303
+ int alen ;
304
+ void * tmp ;
305
+ int ivlen ;
306
+ int assoclen ;
307
+ int seqhilen ;
308
+ __be32 * seqhi ;
309
+ struct page * page ;
310
+ struct ip_esp_hdr * esph ;
311
+ struct aead_request * req ;
312
+ struct crypto_aead * aead ;
313
+ struct scatterlist * sg , * dsg ;
314
+ int err = - ENOMEM ;
390
315
391
- tmp = esp_alloc_tmp (aead , nfrags , seqhilen );
316
+ assoclen = sizeof (struct ip_esp_hdr );
317
+ seqhilen = 0 ;
318
+
319
+ if (x -> props .flags & XFRM_STATE_ESN ) {
320
+ seqhilen += sizeof (__be32 );
321
+ assoclen += sizeof (__be32 );
322
+ }
323
+
324
+ aead = x -> data ;
325
+ alen = crypto_aead_authsize (aead );
326
+ ivlen = crypto_aead_ivsize (aead );
327
+
328
+ tmp = esp_alloc_tmp (aead , esp -> nfrags + 2 , seqhilen );
392
329
if (!tmp ) {
330
+ spin_unlock_bh (& x -> lock );
393
331
err = - ENOMEM ;
394
332
goto error ;
395
333
}
@@ -398,29 +336,57 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
398
336
iv = esp_tmp_iv (aead , tmp , seqhilen );
399
337
req = esp_tmp_req (aead , iv );
400
338
sg = esp_req_sg (aead , req );
401
- dsg = sg ;
402
339
403
- esph = esp_output_set_esn (skb , esph , seqhi );
340
+ if (esp -> inplace )
341
+ dsg = sg ;
342
+ else
343
+ dsg = & sg [esp -> nfrags ];
404
344
405
- sg_init_table (sg , nfrags );
345
+ esph = esp_output_set_esn (skb , x , ip_esp_hdr (skb ), seqhi );
346
+
347
+ sg_init_table (sg , esp -> nfrags );
406
348
skb_to_sgvec (skb , sg ,
407
349
(unsigned char * )esph - skb -> data ,
408
- assoclen + ivlen + clen + alen );
350
+ assoclen + ivlen + esp -> clen + alen );
351
+
352
+ if (!esp -> inplace ) {
353
+ int allocsize ;
354
+ struct page_frag * pfrag = & x -> xfrag ;
355
+
356
+ allocsize = ALIGN (skb -> data_len , L1_CACHE_BYTES );
357
+
358
+ spin_lock_bh (& x -> lock );
359
+ if (unlikely (!skb_page_frag_refill (allocsize , pfrag , GFP_ATOMIC ))) {
360
+ spin_unlock_bh (& x -> lock );
361
+ err = - ENOMEM ;
362
+ goto error ;
363
+ }
364
+
365
+ skb_shinfo (skb )-> nr_frags = 1 ;
366
+
367
+ page = pfrag -> page ;
368
+ get_page (page );
369
+ /* replace page frags in skb with new page */
370
+ __skb_fill_page_desc (skb , 0 , page , pfrag -> offset , skb -> data_len );
371
+ pfrag -> offset = pfrag -> offset + allocsize ;
372
+ spin_unlock_bh (& x -> lock );
373
+
374
+ sg_init_table (dsg , skb_shinfo (skb )-> nr_frags + 1 );
375
+ skb_to_sgvec (skb , dsg ,
376
+ (unsigned char * )esph - skb -> data ,
377
+ assoclen + ivlen + esp -> clen + alen );
378
+ }
409
379
410
- skip_cow2 :
411
380
if ((x -> props .flags & XFRM_STATE_ESN ))
412
381
aead_request_set_callback (req , 0 , esp_output_done_esn , skb );
413
382
else
414
383
aead_request_set_callback (req , 0 , esp_output_done , skb );
415
384
416
- aead_request_set_crypt (req , sg , dsg , ivlen + clen , iv );
385
+ aead_request_set_crypt (req , sg , dsg , ivlen + esp -> clen , iv );
417
386
aead_request_set_ad (req , assoclen );
418
387
419
- seqno = cpu_to_be64 (XFRM_SKB_CB (skb )-> seq .output .low +
420
- ((u64 )XFRM_SKB_CB (skb )-> seq .output .hi << 32 ));
421
-
422
388
memset (iv , 0 , ivlen );
423
- memcpy (iv + ivlen - min (ivlen , 8 ), (u8 * )& seqno + 8 - min (ivlen , 8 ),
389
+ memcpy (iv + ivlen - min (ivlen , 8 ), (u8 * )& esp -> seqno + 8 - min (ivlen , 8 ),
424
390
min (ivlen , 8 ));
425
391
426
392
ESP_SKB_CB (skb )-> tmp = tmp ;
@@ -446,8 +412,57 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
446
412
error :
447
413
return err ;
448
414
}
415
+ EXPORT_SYMBOL_GPL (esp6_output_tail );
416
+
417
+ static int esp6_output (struct xfrm_state * x , struct sk_buff * skb )
418
+ {
419
+ int alen ;
420
+ int blksize ;
421
+ struct ip_esp_hdr * esph ;
422
+ struct crypto_aead * aead ;
423
+ struct esp_info esp ;
424
+
425
+ esp .inplace = true;
426
+
427
+ esp .proto = * skb_mac_header (skb );
428
+ * skb_mac_header (skb ) = IPPROTO_ESP ;
429
+
430
+ /* skb is pure payload to encrypt */
431
+
432
+ aead = x -> data ;
433
+ alen = crypto_aead_authsize (aead );
434
+
435
+ esp .tfclen = 0 ;
436
+ if (x -> tfcpad ) {
437
+ struct xfrm_dst * dst = (struct xfrm_dst * )skb_dst (skb );
438
+ u32 padto ;
439
+
440
+ padto = min (x -> tfcpad , esp6_get_mtu (x , dst -> child_mtu_cached ));
441
+ if (skb -> len < padto )
442
+ esp .tfclen = padto - skb -> len ;
443
+ }
444
+ blksize = ALIGN (crypto_aead_blocksize (aead ), 4 );
445
+ esp .clen = ALIGN (skb -> len + 2 + esp .tfclen , blksize );
446
+ esp .plen = esp .clen - skb -> len - esp .tfclen ;
447
+ esp .tailen = esp .tfclen + esp .plen + alen ;
448
+
449
+ esp .nfrags = esp6_output_head (x , skb , & esp );
450
+ if (esp .nfrags < 0 )
451
+ return esp .nfrags ;
452
+
453
+ esph = ip_esp_hdr (skb );
454
+ esph -> spi = x -> id .spi ;
455
+
456
+ esph -> seq_no = htonl (XFRM_SKB_CB (skb )-> seq .output .low );
457
+ esp .seqno = cpu_to_be64 (XFRM_SKB_CB (skb )-> seq .output .low +
458
+ ((u64 )XFRM_SKB_CB (skb )-> seq .output .hi << 32 ));
459
+
460
+ skb_push (skb , - skb_network_offset (skb ));
461
+
462
+ return esp6_output_tail (x , skb , & esp );
463
+ }
449
464
450
- static int esp6_input_done2 (struct sk_buff * skb , int err )
465
+ int esp6_input_done2 (struct sk_buff * skb , int err )
451
466
{
452
467
struct xfrm_state * x = xfrm_input_state (skb );
453
468
struct xfrm_offload * xo = xfrm_offload (skb );
@@ -494,6 +509,7 @@ static int esp6_input_done2(struct sk_buff *skb, int err)
494
509
out :
495
510
return err ;
496
511
}
512
+ EXPORT_SYMBOL_GPL (esp6_input_done2 );
497
513
498
514
static void esp_input_done (struct crypto_async_request * base , int err )
499
515
{
0 commit comments