61
61
#include <linux/proc_fs.h>
62
62
#include <linux/in.h>
63
63
#include <linux/ip.h>
64
+ #include <linux/ipv6.h>
64
65
#include <linux/irq.h>
65
66
#include <linux/kthread.h>
66
67
#include <linux/seq_file.h>
@@ -94,6 +95,7 @@ static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
94
95
static int ibmvnic_send_crq (struct ibmvnic_adapter * , union ibmvnic_crq * );
95
96
static int send_subcrq (struct ibmvnic_adapter * adapter , u64 remote_handle ,
96
97
union sub_crq * sub_crq );
98
+ static int send_subcrq_indirect (struct ibmvnic_adapter * , u64 , u64 , u64 );
97
99
static irqreturn_t ibmvnic_interrupt_rx (int irq , void * instance );
98
100
static int enable_scrq_irq (struct ibmvnic_adapter * ,
99
101
struct ibmvnic_sub_crq_queue * );
@@ -561,10 +563,141 @@ static int ibmvnic_close(struct net_device *netdev)
561
563
return 0 ;
562
564
}
563
565
566
+ /**
567
+ * build_hdr_data - creates L2/L3/L4 header data buffer
568
+ * @hdr_field - bitfield determining needed headers
569
+ * @skb - socket buffer
570
+ * @hdr_len - array of header lengths
571
+ * @tot_len - total length of data
572
+ *
573
+ * Reads hdr_field to determine which headers are needed by firmware.
574
+ * Builds a buffer containing these headers. Saves individual header
575
+ * lengths and total buffer length to be used to build descriptors.
576
+ */
577
+ static int build_hdr_data (u8 hdr_field , struct sk_buff * skb ,
578
+ int * hdr_len , u8 * hdr_data )
579
+ {
580
+ int len = 0 ;
581
+ u8 * hdr ;
582
+
583
+ hdr_len [0 ] = sizeof (struct ethhdr );
584
+
585
+ if (skb -> protocol == htons (ETH_P_IP )) {
586
+ hdr_len [1 ] = ip_hdr (skb )-> ihl * 4 ;
587
+ if (ip_hdr (skb )-> protocol == IPPROTO_TCP )
588
+ hdr_len [2 ] = tcp_hdrlen (skb );
589
+ else if (ip_hdr (skb )-> protocol == IPPROTO_UDP )
590
+ hdr_len [2 ] = sizeof (struct udphdr );
591
+ } else if (skb -> protocol == htons (ETH_P_IPV6 )) {
592
+ hdr_len [1 ] = sizeof (struct ipv6hdr );
593
+ if (ipv6_hdr (skb )-> nexthdr == IPPROTO_TCP )
594
+ hdr_len [2 ] = tcp_hdrlen (skb );
595
+ else if (ipv6_hdr (skb )-> nexthdr == IPPROTO_UDP )
596
+ hdr_len [2 ] = sizeof (struct udphdr );
597
+ }
598
+
599
+ memset (hdr_data , 0 , 120 );
600
+ if ((hdr_field >> 6 ) & 1 ) {
601
+ hdr = skb_mac_header (skb );
602
+ memcpy (hdr_data , hdr , hdr_len [0 ]);
603
+ len += hdr_len [0 ];
604
+ }
605
+
606
+ if ((hdr_field >> 5 ) & 1 ) {
607
+ hdr = skb_network_header (skb );
608
+ memcpy (hdr_data + len , hdr , hdr_len [1 ]);
609
+ len += hdr_len [1 ];
610
+ }
611
+
612
+ if ((hdr_field >> 4 ) & 1 ) {
613
+ hdr = skb_transport_header (skb );
614
+ memcpy (hdr_data + len , hdr , hdr_len [2 ]);
615
+ len += hdr_len [2 ];
616
+ }
617
+ return len ;
618
+ }
619
+
620
+ /**
621
+ * create_hdr_descs - create header and header extension descriptors
622
+ * @hdr_field - bitfield determining needed headers
623
+ * @data - buffer containing header data
624
+ * @len - length of data buffer
625
+ * @hdr_len - array of individual header lengths
626
+ * @scrq_arr - descriptor array
627
+ *
628
+ * Creates header and, if needed, header extension descriptors and
629
+ * places them in a descriptor array, scrq_arr
630
+ */
631
+
632
+ static void create_hdr_descs (u8 hdr_field , u8 * hdr_data , int len , int * hdr_len ,
633
+ union sub_crq * scrq_arr )
634
+ {
635
+ union sub_crq hdr_desc ;
636
+ int tmp_len = len ;
637
+ u8 * data , * cur ;
638
+ int tmp ;
639
+
640
+ while (tmp_len > 0 ) {
641
+ cur = hdr_data + len - tmp_len ;
642
+
643
+ memset (& hdr_desc , 0 , sizeof (hdr_desc ));
644
+ if (cur != hdr_data ) {
645
+ data = hdr_desc .hdr_ext .data ;
646
+ tmp = tmp_len > 29 ? 29 : tmp_len ;
647
+ hdr_desc .hdr_ext .first = IBMVNIC_CRQ_CMD ;
648
+ hdr_desc .hdr_ext .type = IBMVNIC_HDR_EXT_DESC ;
649
+ hdr_desc .hdr_ext .len = tmp ;
650
+ } else {
651
+ data = hdr_desc .hdr .data ;
652
+ tmp = tmp_len > 24 ? 24 : tmp_len ;
653
+ hdr_desc .hdr .first = IBMVNIC_CRQ_CMD ;
654
+ hdr_desc .hdr .type = IBMVNIC_HDR_DESC ;
655
+ hdr_desc .hdr .len = tmp ;
656
+ hdr_desc .hdr .l2_len = (u8 )hdr_len [0 ];
657
+ hdr_desc .hdr .l3_len = cpu_to_be16 ((u16 )hdr_len [1 ]);
658
+ hdr_desc .hdr .l4_len = (u8 )hdr_len [2 ];
659
+ hdr_desc .hdr .flag = hdr_field << 1 ;
660
+ }
661
+ memcpy (data , cur , tmp );
662
+ tmp_len -= tmp ;
663
+ * scrq_arr = hdr_desc ;
664
+ scrq_arr ++ ;
665
+ }
666
+ }
667
+
668
+ /**
669
+ * build_hdr_descs_arr - build a header descriptor array
670
+ * @skb - socket buffer
671
+ * @num_entries - number of descriptors to be sent
672
+ * @subcrq - first TX descriptor
673
+ * @hdr_field - bit field determining which headers will be sent
674
+ *
675
+ * This function will build a TX descriptor array with applicable
676
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
677
+ */
678
+
679
+ static void build_hdr_descs_arr (struct ibmvnic_tx_buff * txbuff ,
680
+ int * num_entries , u8 hdr_field )
681
+ {
682
+ int hdr_len [3 ] = {0 , 0 , 0 };
683
+ int tot_len , len ;
684
+ u8 * hdr_data = txbuff -> hdr_data ;
685
+
686
+ tot_len = build_hdr_data (hdr_field , txbuff -> skb , hdr_len ,
687
+ txbuff -> hdr_data );
688
+ len = tot_len ;
689
+ len -= 24 ;
690
+ if (len > 0 )
691
+ num_entries += len % 29 ? len / 29 + 1 : len / 29 ;
692
+ create_hdr_descs (hdr_field , hdr_data , tot_len , hdr_len ,
693
+ txbuff -> indir_arr + 1 );
694
+ }
695
+
564
696
static int ibmvnic_xmit (struct sk_buff * skb , struct net_device * netdev )
565
697
{
566
698
struct ibmvnic_adapter * adapter = netdev_priv (netdev );
567
699
int queue_num = skb_get_queue_mapping (skb );
700
+ u8 * hdrs = (u8 * )& adapter -> tx_rx_desc_req ;
568
701
struct device * dev = & adapter -> vdev -> dev ;
569
702
struct ibmvnic_tx_buff * tx_buff = NULL ;
570
703
struct ibmvnic_tx_pool * tx_pool ;
@@ -579,6 +712,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
579
712
unsigned long lpar_rc ;
580
713
union sub_crq tx_crq ;
581
714
unsigned int offset ;
715
+ int num_entries = 1 ;
582
716
unsigned char * dst ;
583
717
u64 * handle_array ;
584
718
int index = 0 ;
@@ -644,11 +778,34 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
644
778
tx_crq .v1 .flags1 |= IBMVNIC_TX_PROT_UDP ;
645
779
}
646
780
647
- if (skb -> ip_summed == CHECKSUM_PARTIAL )
781
+ if (skb -> ip_summed == CHECKSUM_PARTIAL ) {
648
782
tx_crq .v1 .flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD ;
649
-
650
- lpar_rc = send_subcrq (adapter , handle_array [0 ], & tx_crq );
651
-
783
+ hdrs += 2 ;
784
+ }
785
+ /* determine if l2/3/4 headers are sent to firmware */
786
+ if ((* hdrs >> 7 ) & 1 &&
787
+ (skb -> protocol == htons (ETH_P_IP ) ||
788
+ skb -> protocol == htons (ETH_P_IPV6 ))) {
789
+ build_hdr_descs_arr (tx_buff , & num_entries , * hdrs );
790
+ tx_crq .v1 .n_crq_elem = num_entries ;
791
+ tx_buff -> indir_arr [0 ] = tx_crq ;
792
+ tx_buff -> indir_dma = dma_map_single (dev , tx_buff -> indir_arr ,
793
+ sizeof (tx_buff -> indir_arr ),
794
+ DMA_TO_DEVICE );
795
+ if (dma_mapping_error (dev , tx_buff -> indir_dma )) {
796
+ if (!firmware_has_feature (FW_FEATURE_CMO ))
797
+ dev_err (dev , "tx: unable to map descriptor array\n" );
798
+ tx_map_failed ++ ;
799
+ tx_dropped ++ ;
800
+ ret = NETDEV_TX_BUSY ;
801
+ goto out ;
802
+ }
803
+ lpar_rc = send_subcrq_indirect (adapter , handle_array [0 ],
804
+ (u64 )tx_buff -> indir_dma ,
805
+ (u64 )num_entries );
806
+ } else {
807
+ lpar_rc = send_subcrq (adapter , handle_array [0 ], & tx_crq );
808
+ }
652
809
if (lpar_rc != H_SUCCESS ) {
653
810
dev_err (dev , "tx failed with code %ld\n" , lpar_rc );
654
811
@@ -1159,6 +1316,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1159
1316
union sub_crq * next ;
1160
1317
int index ;
1161
1318
int i , j ;
1319
+ u8 first ;
1162
1320
1163
1321
restart_loop :
1164
1322
while (pending_scrq (adapter , scrq )) {
@@ -1181,6 +1339,13 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1181
1339
txbuff -> data_dma [j ] = 0 ;
1182
1340
txbuff -> used_bounce = false;
1183
1341
}
1342
+ /* if sub_crq was sent indirectly */
1343
+ first = txbuff -> indir_arr [0 ].generic .first ;
1344
+ if (first == IBMVNIC_CRQ_CMD ) {
1345
+ dma_unmap_single (dev , txbuff -> indir_dma ,
1346
+ sizeof (txbuff -> indir_arr ),
1347
+ DMA_TO_DEVICE );
1348
+ }
1184
1349
1185
1350
if (txbuff -> last_frag )
1186
1351
dev_kfree_skb_any (txbuff -> skb );
@@ -1494,6 +1659,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1494
1659
return rc ;
1495
1660
}
1496
1661
1662
+ static int send_subcrq_indirect (struct ibmvnic_adapter * adapter ,
1663
+ u64 remote_handle , u64 ioba , u64 num_entries )
1664
+ {
1665
+ unsigned int ua = adapter -> vdev -> unit_address ;
1666
+ struct device * dev = & adapter -> vdev -> dev ;
1667
+ int rc ;
1668
+
1669
+ /* Make sure the hypervisor sees the complete request */
1670
+ mb ();
1671
+ rc = plpar_hcall_norets (H_SEND_SUB_CRQ_INDIRECT , ua ,
1672
+ cpu_to_be64 (remote_handle ),
1673
+ ioba , num_entries );
1674
+
1675
+ if (rc ) {
1676
+ if (rc == H_CLOSED )
1677
+ dev_warn (dev , "CRQ Queue closed\n" );
1678
+ dev_err (dev , "Send (indirect) error (rc=%d)\n" , rc );
1679
+ }
1680
+
1681
+ return rc ;
1682
+ }
1683
+
1497
1684
static int ibmvnic_send_crq (struct ibmvnic_adapter * adapter ,
1498
1685
union ibmvnic_crq * crq )
1499
1686
{
0 commit comments