@@ -585,27 +585,148 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
585
585
__le32 ref_decap_handle ,
586
586
__le32 * decap_filter_handle )
587
587
{
588
- return 0 ;
588
+ struct hwrm_cfa_decap_filter_alloc_output * resp =
589
+ bp -> hwrm_cmd_resp_addr ;
590
+ struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
591
+ struct ip_tunnel_key * tun_key = & flow -> tun_key ;
592
+ u32 enables = 0 ;
593
+ int rc ;
594
+
595
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_CFA_DECAP_FILTER_ALLOC , -1 , -1 );
596
+
597
+ req .flags = cpu_to_le32 (CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL );
598
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
599
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL ;
600
+ req .tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ;
601
+ req .ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP ;
602
+
603
+ if (flow -> flags & BNXT_TC_FLOW_FLAGS_TUNL_ID ) {
604
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID ;
605
+ /* tunnel_id is wrongly defined in hsi defn. as __le32 */
606
+ req .tunnel_id = tunnel_id_to_key32 (tun_key -> tun_id );
607
+ }
608
+
609
+ if (flow -> flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS ) {
610
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
611
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR ;
612
+ ether_addr_copy (req .dst_macaddr , l2_info -> dmac );
613
+ ether_addr_copy (req .src_macaddr , l2_info -> smac );
614
+ }
615
+ if (l2_info -> num_vlans ) {
616
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID ;
617
+ req .t_ivlan_vid = l2_info -> inner_vlan_tci ;
618
+ }
619
+
620
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE ;
621
+ req .ethertype = htons (ETH_P_IP );
622
+
623
+ if (flow -> flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS ) {
624
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
625
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
626
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE ;
627
+ req .ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 ;
628
+ req .dst_ipaddr [0 ] = tun_key -> u .ipv4 .dst ;
629
+ req .src_ipaddr [0 ] = tun_key -> u .ipv4 .src ;
630
+ }
631
+
632
+ if (flow -> flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS ) {
633
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT ;
634
+ req .dst_port = tun_key -> tp_dst ;
635
+ }
636
+
637
+ /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
638
+ * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
639
+ */
640
+ req .l2_ctxt_ref_id = (__force __le16 )ref_decap_handle ;
641
+ req .enables = cpu_to_le32 (enables );
642
+
643
+ mutex_lock (& bp -> hwrm_cmd_lock );
644
+ rc = _hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
645
+ if (!rc )
646
+ * decap_filter_handle = resp -> decap_filter_id ;
647
+ else
648
+ netdev_info (bp -> dev , "%s: Error rc=%d" , __func__ , rc );
649
+ mutex_unlock (& bp -> hwrm_cmd_lock );
650
+
651
+ return rc ;
589
652
}
590
653
591
654
static int hwrm_cfa_decap_filter_free (struct bnxt * bp ,
592
655
__le32 decap_filter_handle )
593
656
{
594
- return 0 ;
657
+ struct hwrm_cfa_decap_filter_free_input req = { 0 };
658
+ int rc ;
659
+
660
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_CFA_DECAP_FILTER_FREE , -1 , -1 );
661
+ req .decap_filter_id = decap_filter_handle ;
662
+
663
+ rc = hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
664
+ if (rc )
665
+ netdev_info (bp -> dev , "%s: Error rc=%d" , __func__ , rc );
666
+ return rc ;
595
667
}
596
668
597
669
static int hwrm_cfa_encap_record_alloc (struct bnxt * bp ,
598
670
struct ip_tunnel_key * encap_key ,
599
671
struct bnxt_tc_l2_key * l2_info ,
600
672
__le32 * encap_record_handle )
601
673
{
602
- return 0 ;
674
+ struct hwrm_cfa_encap_record_alloc_output * resp =
675
+ bp -> hwrm_cmd_resp_addr ;
676
+ struct hwrm_cfa_encap_record_alloc_input req = { 0 };
677
+ struct hwrm_cfa_encap_data_vxlan * encap =
678
+ (struct hwrm_cfa_encap_data_vxlan * )& req .encap_data ;
679
+ struct hwrm_vxlan_ipv4_hdr * encap_ipv4 =
680
+ (struct hwrm_vxlan_ipv4_hdr * )encap -> l3 ;
681
+ int rc ;
682
+
683
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_CFA_ENCAP_RECORD_ALLOC , -1 , -1 );
684
+
685
+ req .encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN ;
686
+
687
+ ether_addr_copy (encap -> dst_mac_addr , l2_info -> dmac );
688
+ ether_addr_copy (encap -> src_mac_addr , l2_info -> smac );
689
+ if (l2_info -> num_vlans ) {
690
+ encap -> num_vlan_tags = l2_info -> num_vlans ;
691
+ encap -> ovlan_tci = l2_info -> inner_vlan_tci ;
692
+ encap -> ovlan_tpid = l2_info -> inner_vlan_tpid ;
693
+ }
694
+
695
+ encap_ipv4 -> ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT ;
696
+ encap_ipv4 -> ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT ;
697
+ encap_ipv4 -> ttl = encap_key -> ttl ;
698
+
699
+ encap_ipv4 -> dest_ip_addr = encap_key -> u .ipv4 .dst ;
700
+ encap_ipv4 -> src_ip_addr = encap_key -> u .ipv4 .src ;
701
+ encap_ipv4 -> protocol = IPPROTO_UDP ;
702
+
703
+ encap -> dst_port = encap_key -> tp_dst ;
704
+ encap -> vni = tunnel_id_to_key32 (encap_key -> tun_id );
705
+
706
+ mutex_lock (& bp -> hwrm_cmd_lock );
707
+ rc = _hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
708
+ if (!rc )
709
+ * encap_record_handle = resp -> encap_record_id ;
710
+ else
711
+ netdev_info (bp -> dev , "%s: Error rc=%d" , __func__ , rc );
712
+ mutex_unlock (& bp -> hwrm_cmd_lock );
713
+
714
+ return rc ;
603
715
}
604
716
605
717
static int hwrm_cfa_encap_record_free (struct bnxt * bp ,
606
718
__le32 encap_record_handle )
607
719
{
608
- return 0 ;
720
+ struct hwrm_cfa_encap_record_free_input req = { 0 };
721
+ int rc ;
722
+
723
+ bnxt_hwrm_cmd_hdr_init (bp , & req , HWRM_CFA_ENCAP_RECORD_FREE , -1 , -1 );
724
+ req .encap_record_id = encap_record_handle ;
725
+
726
+ rc = hwrm_send_message (bp , & req , sizeof (req ), HWRM_CMD_TIMEOUT );
727
+ if (rc )
728
+ netdev_info (bp -> dev , "%s: Error rc=%d" , __func__ , rc );
729
+ return rc ;
609
730
}
610
731
611
732
static int bnxt_tc_put_l2_node (struct bnxt * bp ,
0 commit comments