Skip to content

Commit ad7775d

Browse files
tlfalcondavem330
authored andcommitted
ibmvnic: map L2/L3/L4 header descriptors to firmware
Allow the VNIC driver to provide descriptors containing L2/L3/L4 headers to firmware. This feature is needed for greater hardware compatibility and enablement of checksum and TCP offloading features. A new function is included for the hypervisor call, H_SEND_SUBCRQ_INDIRECT, allowing a DMA-mapped array of SCRQ descriptor elements to be sent to the VNIC server. These additions will help fully enable checksum offloading as well as other features as they are included later. Signed-off-by: Thomas Falcon <[email protected]> Cc: John Allen <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 590ddaa commit ad7775d

File tree

2 files changed

+194
-4
lines changed

2 files changed

+194
-4
lines changed

drivers/net/ethernet/ibm/ibmvnic.c

Lines changed: 191 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
#include <linux/proc_fs.h>
6262
#include <linux/in.h>
6363
#include <linux/ip.h>
64+
#include <linux/ipv6.h>
6465
#include <linux/irq.h>
6566
#include <linux/kthread.h>
6667
#include <linux/seq_file.h>
@@ -94,6 +95,7 @@ static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
9495
static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
9596
static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
9697
union sub_crq *sub_crq);
98+
static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
9799
static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
98100
static int enable_scrq_irq(struct ibmvnic_adapter *,
99101
struct ibmvnic_sub_crq_queue *);
@@ -561,10 +563,141 @@ static int ibmvnic_close(struct net_device *netdev)
561563
return 0;
562564
}
563565

566+
/**
567+
* build_hdr_data - creates L2/L3/L4 header data buffer
568+
* @hdr_field - bitfield determining needed headers
569+
* @skb - socket buffer
570+
* @hdr_len - array of header lengths
571+
* @tot_len - total length of data
572+
*
573+
* Reads hdr_field to determine which headers are needed by firmware.
574+
* Builds a buffer containing these headers. Saves individual header
575+
* lengths and total buffer length to be used to build descriptors.
576+
*/
577+
static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
578+
int *hdr_len, u8 *hdr_data)
579+
{
580+
int len = 0;
581+
u8 *hdr;
582+
583+
hdr_len[0] = sizeof(struct ethhdr);
584+
585+
if (skb->protocol == htons(ETH_P_IP)) {
586+
hdr_len[1] = ip_hdr(skb)->ihl * 4;
587+
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
588+
hdr_len[2] = tcp_hdrlen(skb);
589+
else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
590+
hdr_len[2] = sizeof(struct udphdr);
591+
} else if (skb->protocol == htons(ETH_P_IPV6)) {
592+
hdr_len[1] = sizeof(struct ipv6hdr);
593+
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
594+
hdr_len[2] = tcp_hdrlen(skb);
595+
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
596+
hdr_len[2] = sizeof(struct udphdr);
597+
}
598+
599+
memset(hdr_data, 0, 120);
600+
if ((hdr_field >> 6) & 1) {
601+
hdr = skb_mac_header(skb);
602+
memcpy(hdr_data, hdr, hdr_len[0]);
603+
len += hdr_len[0];
604+
}
605+
606+
if ((hdr_field >> 5) & 1) {
607+
hdr = skb_network_header(skb);
608+
memcpy(hdr_data + len, hdr, hdr_len[1]);
609+
len += hdr_len[1];
610+
}
611+
612+
if ((hdr_field >> 4) & 1) {
613+
hdr = skb_transport_header(skb);
614+
memcpy(hdr_data + len, hdr, hdr_len[2]);
615+
len += hdr_len[2];
616+
}
617+
return len;
618+
}
619+
620+
/**
621+
* create_hdr_descs - create header and header extension descriptors
622+
* @hdr_field - bitfield determining needed headers
623+
* @data - buffer containing header data
624+
* @len - length of data buffer
625+
* @hdr_len - array of individual header lengths
626+
* @scrq_arr - descriptor array
627+
*
628+
* Creates header and, if needed, header extension descriptors and
629+
* places them in a descriptor array, scrq_arr
630+
*/
631+
632+
static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
633+
union sub_crq *scrq_arr)
634+
{
635+
union sub_crq hdr_desc;
636+
int tmp_len = len;
637+
u8 *data, *cur;
638+
int tmp;
639+
640+
while (tmp_len > 0) {
641+
cur = hdr_data + len - tmp_len;
642+
643+
memset(&hdr_desc, 0, sizeof(hdr_desc));
644+
if (cur != hdr_data) {
645+
data = hdr_desc.hdr_ext.data;
646+
tmp = tmp_len > 29 ? 29 : tmp_len;
647+
hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
648+
hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
649+
hdr_desc.hdr_ext.len = tmp;
650+
} else {
651+
data = hdr_desc.hdr.data;
652+
tmp = tmp_len > 24 ? 24 : tmp_len;
653+
hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
654+
hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
655+
hdr_desc.hdr.len = tmp;
656+
hdr_desc.hdr.l2_len = (u8)hdr_len[0];
657+
hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
658+
hdr_desc.hdr.l4_len = (u8)hdr_len[2];
659+
hdr_desc.hdr.flag = hdr_field << 1;
660+
}
661+
memcpy(data, cur, tmp);
662+
tmp_len -= tmp;
663+
*scrq_arr = hdr_desc;
664+
scrq_arr++;
665+
}
666+
}
667+
668+
/**
669+
* build_hdr_descs_arr - build a header descriptor array
670+
* @skb - socket buffer
671+
* @num_entries - number of descriptors to be sent
672+
* @subcrq - first TX descriptor
673+
* @hdr_field - bit field determining which headers will be sent
674+
*
675+
* This function will build a TX descriptor array with applicable
676+
* L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
677+
*/
678+
679+
static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
680+
int *num_entries, u8 hdr_field)
681+
{
682+
int hdr_len[3] = {0, 0, 0};
683+
int tot_len, len;
684+
u8 *hdr_data = txbuff->hdr_data;
685+
686+
tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
687+
txbuff->hdr_data);
688+
len = tot_len;
689+
len -= 24;
690+
if (len > 0)
691+
num_entries += len % 29 ? len / 29 + 1 : len / 29;
692+
create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
693+
txbuff->indir_arr + 1);
694+
}
695+
564696
static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
565697
{
566698
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
567699
int queue_num = skb_get_queue_mapping(skb);
700+
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
568701
struct device *dev = &adapter->vdev->dev;
569702
struct ibmvnic_tx_buff *tx_buff = NULL;
570703
struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +712,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
579712
unsigned long lpar_rc;
580713
union sub_crq tx_crq;
581714
unsigned int offset;
715+
int num_entries = 1;
582716
unsigned char *dst;
583717
u64 *handle_array;
584718
int index = 0;
@@ -644,11 +778,34 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
644778
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
645779
}
646780

647-
if (skb->ip_summed == CHECKSUM_PARTIAL)
781+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
648782
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
649-
650-
lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
651-
783+
hdrs += 2;
784+
}
785+
/* determine if l2/3/4 headers are sent to firmware */
786+
if ((*hdrs >> 7) & 1 &&
787+
(skb->protocol == htons(ETH_P_IP) ||
788+
skb->protocol == htons(ETH_P_IPV6))) {
789+
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
790+
tx_crq.v1.n_crq_elem = num_entries;
791+
tx_buff->indir_arr[0] = tx_crq;
792+
tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
793+
sizeof(tx_buff->indir_arr),
794+
DMA_TO_DEVICE);
795+
if (dma_mapping_error(dev, tx_buff->indir_dma)) {
796+
if (!firmware_has_feature(FW_FEATURE_CMO))
797+
dev_err(dev, "tx: unable to map descriptor array\n");
798+
tx_map_failed++;
799+
tx_dropped++;
800+
ret = NETDEV_TX_BUSY;
801+
goto out;
802+
}
803+
lpar_rc = send_subcrq_indirect(adapter, handle_array[0],
804+
(u64)tx_buff->indir_dma,
805+
(u64)num_entries);
806+
} else {
807+
lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
808+
}
652809
if (lpar_rc != H_SUCCESS) {
653810
dev_err(dev, "tx failed with code %ld\n", lpar_rc);
654811

@@ -1159,6 +1316,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
11591316
union sub_crq *next;
11601317
int index;
11611318
int i, j;
1319+
u8 first;
11621320

11631321
restart_loop:
11641322
while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1339,13 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
11811339
txbuff->data_dma[j] = 0;
11821340
txbuff->used_bounce = false;
11831341
}
1342+
/* if sub_crq was sent indirectly */
1343+
first = txbuff->indir_arr[0].generic.first;
1344+
if (first == IBMVNIC_CRQ_CMD) {
1345+
dma_unmap_single(dev, txbuff->indir_dma,
1346+
sizeof(txbuff->indir_arr),
1347+
DMA_TO_DEVICE);
1348+
}
11841349

11851350
if (txbuff->last_frag)
11861351
dev_kfree_skb_any(txbuff->skb);
@@ -1494,6 +1659,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
14941659
return rc;
14951660
}
14961661

1662+
static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1663+
u64 remote_handle, u64 ioba, u64 num_entries)
1664+
{
1665+
unsigned int ua = adapter->vdev->unit_address;
1666+
struct device *dev = &adapter->vdev->dev;
1667+
int rc;
1668+
1669+
/* Make sure the hypervisor sees the complete request */
1670+
mb();
1671+
rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1672+
cpu_to_be64(remote_handle),
1673+
ioba, num_entries);
1674+
1675+
if (rc) {
1676+
if (rc == H_CLOSED)
1677+
dev_warn(dev, "CRQ Queue closed\n");
1678+
dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1679+
}
1680+
1681+
return rc;
1682+
}
1683+
14971684
static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
14981685
union ibmvnic_crq *crq)
14991686
{

drivers/net/ethernet/ibm/ibmvnic.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -879,6 +879,9 @@ struct ibmvnic_tx_buff {
879879
int pool_index;
880880
bool last_frag;
881881
bool used_bounce;
882+
union sub_crq indir_arr[6];
883+
u8 hdr_data[140];
884+
dma_addr_t indir_dma;
882885
};
883886

884887
struct ibmvnic_tx_pool {

0 commit comments

Comments
 (0)