55
55
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56
56
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
57
57
58
+ #define NFP_FLOWER_MERGE_FIELDS \
59
+ (NFP_FLOWER_LAYER_PORT | \
60
+ NFP_FLOWER_LAYER_MAC | \
61
+ NFP_FLOWER_LAYER_TP | \
62
+ NFP_FLOWER_LAYER_IPV4 | \
63
+ NFP_FLOWER_LAYER_IPV6)
64
+
65
+ struct nfp_flower_merge_check {
66
+ union {
67
+ struct {
68
+ __be16 tci ;
69
+ struct nfp_flower_mac_mpls l2 ;
70
+ struct nfp_flower_tp_ports l4 ;
71
+ union {
72
+ struct nfp_flower_ipv4 ipv4 ;
73
+ struct nfp_flower_ipv6 ipv6 ;
74
+ };
75
+ };
76
+ unsigned long vals [8 ];
77
+ };
78
+ };
79
+
58
80
static int
59
81
nfp_flower_xmit_flow (struct nfp_app * app , struct nfp_fl_payload * nfp_flow ,
60
82
u8 mtype )
@@ -388,6 +410,206 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
388
410
return NULL ;
389
411
}
390
412
413
+ static int
414
+ nfp_flower_update_merge_with_actions (struct nfp_fl_payload * flow ,
415
+ struct nfp_flower_merge_check * merge ,
416
+ u8 * last_act_id , int * act_out )
417
+ {
418
+ struct nfp_fl_set_ipv6_tc_hl_fl * ipv6_tc_hl_fl ;
419
+ struct nfp_fl_set_ip4_ttl_tos * ipv4_ttl_tos ;
420
+ struct nfp_fl_set_ip4_addrs * ipv4_add ;
421
+ struct nfp_fl_set_ipv6_addr * ipv6_add ;
422
+ struct nfp_fl_push_vlan * push_vlan ;
423
+ struct nfp_fl_set_tport * tport ;
424
+ struct nfp_fl_set_eth * eth ;
425
+ struct nfp_fl_act_head * a ;
426
+ unsigned int act_off = 0 ;
427
+ u8 act_id = 0 ;
428
+ u8 * ports ;
429
+ int i ;
430
+
431
+ while (act_off < flow -> meta .act_len ) {
432
+ a = (struct nfp_fl_act_head * )& flow -> action_data [act_off ];
433
+ act_id = a -> jump_id ;
434
+
435
+ switch (act_id ) {
436
+ case NFP_FL_ACTION_OPCODE_OUTPUT :
437
+ if (act_out )
438
+ (* act_out )++ ;
439
+ break ;
440
+ case NFP_FL_ACTION_OPCODE_PUSH_VLAN :
441
+ push_vlan = (struct nfp_fl_push_vlan * )a ;
442
+ if (push_vlan -> vlan_tci )
443
+ merge -> tci = cpu_to_be16 (0xffff );
444
+ break ;
445
+ case NFP_FL_ACTION_OPCODE_POP_VLAN :
446
+ merge -> tci = cpu_to_be16 (0 );
447
+ break ;
448
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL :
449
+ /* New tunnel header means l2 to l4 can be matched. */
450
+ eth_broadcast_addr (& merge -> l2 .mac_dst [0 ]);
451
+ eth_broadcast_addr (& merge -> l2 .mac_src [0 ]);
452
+ memset (& merge -> l4 , 0xff ,
453
+ sizeof (struct nfp_flower_tp_ports ));
454
+ memset (& merge -> ipv4 , 0xff ,
455
+ sizeof (struct nfp_flower_ipv4 ));
456
+ break ;
457
+ case NFP_FL_ACTION_OPCODE_SET_ETHERNET :
458
+ eth = (struct nfp_fl_set_eth * )a ;
459
+ for (i = 0 ; i < ETH_ALEN ; i ++ )
460
+ merge -> l2 .mac_dst [i ] |= eth -> eth_addr_mask [i ];
461
+ for (i = 0 ; i < ETH_ALEN ; i ++ )
462
+ merge -> l2 .mac_src [i ] |=
463
+ eth -> eth_addr_mask [ETH_ALEN + i ];
464
+ break ;
465
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS :
466
+ ipv4_add = (struct nfp_fl_set_ip4_addrs * )a ;
467
+ merge -> ipv4 .ipv4_src |= ipv4_add -> ipv4_src_mask ;
468
+ merge -> ipv4 .ipv4_dst |= ipv4_add -> ipv4_dst_mask ;
469
+ break ;
470
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS :
471
+ ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos * )a ;
472
+ merge -> ipv4 .ip_ext .ttl |= ipv4_ttl_tos -> ipv4_ttl_mask ;
473
+ merge -> ipv4 .ip_ext .tos |= ipv4_ttl_tos -> ipv4_tos_mask ;
474
+ break ;
475
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC :
476
+ ipv6_add = (struct nfp_fl_set_ipv6_addr * )a ;
477
+ for (i = 0 ; i < 4 ; i ++ )
478
+ merge -> ipv6 .ipv6_src .in6_u .u6_addr32 [i ] |=
479
+ ipv6_add -> ipv6 [i ].mask ;
480
+ break ;
481
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_DST :
482
+ ipv6_add = (struct nfp_fl_set_ipv6_addr * )a ;
483
+ for (i = 0 ; i < 4 ; i ++ )
484
+ merge -> ipv6 .ipv6_dst .in6_u .u6_addr32 [i ] |=
485
+ ipv6_add -> ipv6 [i ].mask ;
486
+ break ;
487
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL :
488
+ ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl * )a ;
489
+ merge -> ipv6 .ip_ext .ttl |=
490
+ ipv6_tc_hl_fl -> ipv6_hop_limit_mask ;
491
+ merge -> ipv6 .ip_ext .tos |= ipv6_tc_hl_fl -> ipv6_tc_mask ;
492
+ merge -> ipv6 .ipv6_flow_label_exthdr |=
493
+ ipv6_tc_hl_fl -> ipv6_label_mask ;
494
+ break ;
495
+ case NFP_FL_ACTION_OPCODE_SET_UDP :
496
+ case NFP_FL_ACTION_OPCODE_SET_TCP :
497
+ tport = (struct nfp_fl_set_tport * )a ;
498
+ ports = (u8 * )& merge -> l4 .port_src ;
499
+ for (i = 0 ; i < 4 ; i ++ )
500
+ ports [i ] |= tport -> tp_port_mask [i ];
501
+ break ;
502
+ case NFP_FL_ACTION_OPCODE_PRE_TUNNEL :
503
+ case NFP_FL_ACTION_OPCODE_PRE_LAG :
504
+ case NFP_FL_ACTION_OPCODE_PUSH_GENEVE :
505
+ break ;
506
+ default :
507
+ return - EOPNOTSUPP ;
508
+ }
509
+
510
+ act_off += a -> len_lw << NFP_FL_LW_SIZ ;
511
+ }
512
+
513
+ if (last_act_id )
514
+ * last_act_id = act_id ;
515
+
516
+ return 0 ;
517
+ }
518
+
519
+ static int
520
+ nfp_flower_populate_merge_match (struct nfp_fl_payload * flow ,
521
+ struct nfp_flower_merge_check * merge ,
522
+ bool extra_fields )
523
+ {
524
+ struct nfp_flower_meta_tci * meta_tci ;
525
+ u8 * mask = flow -> mask_data ;
526
+ u8 key_layer , match_size ;
527
+
528
+ memset (merge , 0 , sizeof (struct nfp_flower_merge_check ));
529
+
530
+ meta_tci = (struct nfp_flower_meta_tci * )mask ;
531
+ key_layer = meta_tci -> nfp_flow_key_layer ;
532
+
533
+ if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields )
534
+ return - EOPNOTSUPP ;
535
+
536
+ merge -> tci = meta_tci -> tci ;
537
+ mask += sizeof (struct nfp_flower_meta_tci );
538
+
539
+ if (key_layer & NFP_FLOWER_LAYER_EXT_META )
540
+ mask += sizeof (struct nfp_flower_ext_meta );
541
+
542
+ mask += sizeof (struct nfp_flower_in_port );
543
+
544
+ if (key_layer & NFP_FLOWER_LAYER_MAC ) {
545
+ match_size = sizeof (struct nfp_flower_mac_mpls );
546
+ memcpy (& merge -> l2 , mask , match_size );
547
+ mask += match_size ;
548
+ }
549
+
550
+ if (key_layer & NFP_FLOWER_LAYER_TP ) {
551
+ match_size = sizeof (struct nfp_flower_tp_ports );
552
+ memcpy (& merge -> l4 , mask , match_size );
553
+ mask += match_size ;
554
+ }
555
+
556
+ if (key_layer & NFP_FLOWER_LAYER_IPV4 ) {
557
+ match_size = sizeof (struct nfp_flower_ipv4 );
558
+ memcpy (& merge -> ipv4 , mask , match_size );
559
+ }
560
+
561
+ if (key_layer & NFP_FLOWER_LAYER_IPV6 ) {
562
+ match_size = sizeof (struct nfp_flower_ipv6 );
563
+ memcpy (& merge -> ipv6 , mask , match_size );
564
+ }
565
+
566
+ return 0 ;
567
+ }
568
+
569
+ static int
570
+ nfp_flower_can_merge (struct nfp_fl_payload * sub_flow1 ,
571
+ struct nfp_fl_payload * sub_flow2 )
572
+ {
573
+ /* Two flows can be merged if sub_flow2 only matches on bits that are
574
+ * either matched by sub_flow1 or set by a sub_flow1 action. This
575
+ * ensures that every packet that hits sub_flow1 and recirculates is
576
+ * guaranteed to hit sub_flow2.
577
+ */
578
+ struct nfp_flower_merge_check sub_flow1_merge , sub_flow2_merge ;
579
+ int err , act_out = 0 ;
580
+ u8 last_act_id = 0 ;
581
+
582
+ err = nfp_flower_populate_merge_match (sub_flow1 , & sub_flow1_merge ,
583
+ true);
584
+ if (err )
585
+ return err ;
586
+
587
+ err = nfp_flower_populate_merge_match (sub_flow2 , & sub_flow2_merge ,
588
+ false);
589
+ if (err )
590
+ return err ;
591
+
592
+ err = nfp_flower_update_merge_with_actions (sub_flow1 , & sub_flow1_merge ,
593
+ & last_act_id , & act_out );
594
+ if (err )
595
+ return err ;
596
+
597
+ /* Must only be 1 output action and it must be the last in sequence. */
598
+ if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT )
599
+ return - EOPNOTSUPP ;
600
+
601
+ /* Reject merge if sub_flow2 matches on something that is not matched
602
+ * on or set in an action by sub_flow1.
603
+ */
604
+ err = bitmap_andnot (sub_flow2_merge .vals , sub_flow2_merge .vals ,
605
+ sub_flow1_merge .vals ,
606
+ sizeof (struct nfp_flower_merge_check ) * 8 );
607
+ if (err )
608
+ return - EINVAL ;
609
+
610
+ return 0 ;
611
+ }
612
+
391
613
/**
392
614
* nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
393
615
* @app: Pointer to the APP handle
@@ -403,6 +625,12 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
403
625
struct nfp_fl_payload * sub_flow1 ,
404
626
struct nfp_fl_payload * sub_flow2 )
405
627
{
628
+ int err ;
629
+
630
+ err = nfp_flower_can_merge (sub_flow1 , sub_flow2 );
631
+ if (err )
632
+ return err ;
633
+
406
634
return - EOPNOTSUPP ;
407
635
}
408
636
0 commit comments