Skip to content

Commit fe57762

Browse files
johndale88kuba-moo
authored andcommitted
enic: Move RX functions to their own file
Move RX handler code into its own file in preparation for further changes. Some formatting changes were necessary in order to satisfy checkpatch but there were no functional changes. Co-developed-by: Nelson Escobar <[email protected]> Signed-off-by: Nelson Escobar <[email protected]> Co-developed-by: Satish Kharat <[email protected]> Signed-off-by: Satish Kharat <[email protected]> Signed-off-by: John Daley <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 6597e8d commit fe57762

File tree

4 files changed

+254
-238
lines changed

4 files changed

+254
-238
lines changed

drivers/net/ethernet/cisco/enic/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
33

44
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
55
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
6-
enic_ethtool.o enic_api.o enic_clsf.o
6+
enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o
77

drivers/net/ethernet/cisco/enic/enic_main.c

Lines changed: 1 addition & 237 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@
5858
#include "enic_dev.h"
5959
#include "enic_pp.h"
6060
#include "enic_clsf.h"
61+
#include "enic_rq.h"
6162

6263
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
6364
#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -1313,243 +1314,6 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
13131314
return -EMSGSIZE;
13141315
}
13151316

1316-
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1317-
{
1318-
struct enic *enic = vnic_dev_priv(rq->vdev);
1319-
1320-
if (!buf->os_buf)
1321-
return;
1322-
1323-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1324-
DMA_FROM_DEVICE);
1325-
dev_kfree_skb_any(buf->os_buf);
1326-
buf->os_buf = NULL;
1327-
}
1328-
1329-
static int enic_rq_alloc_buf(struct vnic_rq *rq)
1330-
{
1331-
struct enic *enic = vnic_dev_priv(rq->vdev);
1332-
struct net_device *netdev = enic->netdev;
1333-
struct sk_buff *skb;
1334-
unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1335-
unsigned int os_buf_index = 0;
1336-
dma_addr_t dma_addr;
1337-
struct vnic_rq_buf *buf = rq->to_use;
1338-
1339-
if (buf->os_buf) {
1340-
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
1341-
buf->len);
1342-
1343-
return 0;
1344-
}
1345-
skb = netdev_alloc_skb_ip_align(netdev, len);
1346-
if (!skb) {
1347-
enic->rq[rq->index].stats.no_skb++;
1348-
return -ENOMEM;
1349-
}
1350-
1351-
dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
1352-
DMA_FROM_DEVICE);
1353-
if (unlikely(enic_dma_map_check(enic, dma_addr))) {
1354-
dev_kfree_skb(skb);
1355-
return -ENOMEM;
1356-
}
1357-
1358-
enic_queue_rq_desc(rq, skb, os_buf_index,
1359-
dma_addr, len);
1360-
1361-
return 0;
1362-
}
1363-
1364-
static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
1365-
u32 pkt_len)
1366-
{
1367-
if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
1368-
pkt_size->large_pkt_bytes_cnt += pkt_len;
1369-
else
1370-
pkt_size->small_pkt_bytes_cnt += pkt_len;
1371-
}
1372-
1373-
static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
1374-
struct vnic_rq_buf *buf, u16 len)
1375-
{
1376-
struct enic *enic = netdev_priv(netdev);
1377-
struct sk_buff *new_skb;
1378-
1379-
if (len > enic->rx_copybreak)
1380-
return false;
1381-
new_skb = netdev_alloc_skb_ip_align(netdev, len);
1382-
if (!new_skb)
1383-
return false;
1384-
dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
1385-
DMA_FROM_DEVICE);
1386-
memcpy(new_skb->data, (*skb)->data, len);
1387-
*skb = new_skb;
1388-
1389-
return true;
1390-
}
1391-
1392-
static void enic_rq_indicate_buf(struct vnic_rq *rq,
1393-
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1394-
int skipped, void *opaque)
1395-
{
1396-
struct enic *enic = vnic_dev_priv(rq->vdev);
1397-
struct net_device *netdev = enic->netdev;
1398-
struct sk_buff *skb;
1399-
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1400-
struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
1401-
1402-
u8 type, color, eop, sop, ingress_port, vlan_stripped;
1403-
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1404-
u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1405-
u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1406-
u8 packet_error;
1407-
u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1408-
u32 rss_hash;
1409-
bool outer_csum_ok = true, encap = false;
1410-
1411-
rqstats->packets++;
1412-
if (skipped) {
1413-
rqstats->desc_skip++;
1414-
return;
1415-
}
1416-
1417-
skb = buf->os_buf;
1418-
1419-
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1420-
&type, &color, &q_number, &completed_index,
1421-
&ingress_port, &fcoe, &eop, &sop, &rss_type,
1422-
&csum_not_calc, &rss_hash, &bytes_written,
1423-
&packet_error, &vlan_stripped, &vlan_tci, &checksum,
1424-
&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1425-
&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1426-
&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1427-
&fcs_ok);
1428-
1429-
if (packet_error) {
1430-
1431-
if (!fcs_ok) {
1432-
if (bytes_written > 0)
1433-
rqstats->bad_fcs++;
1434-
else if (bytes_written == 0)
1435-
rqstats->pkt_truncated++;
1436-
}
1437-
1438-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1439-
DMA_FROM_DEVICE);
1440-
dev_kfree_skb_any(skb);
1441-
buf->os_buf = NULL;
1442-
1443-
return;
1444-
}
1445-
1446-
if (eop && bytes_written > 0) {
1447-
1448-
/* Good receive
1449-
*/
1450-
rqstats->bytes += bytes_written;
1451-
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1452-
buf->os_buf = NULL;
1453-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
1454-
buf->len, DMA_FROM_DEVICE);
1455-
}
1456-
prefetch(skb->data - NET_IP_ALIGN);
1457-
1458-
skb_put(skb, bytes_written);
1459-
skb->protocol = eth_type_trans(skb, netdev);
1460-
skb_record_rx_queue(skb, q_number);
1461-
if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
1462-
(type == 3)) {
1463-
switch (rss_type) {
1464-
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
1465-
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
1466-
case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
1467-
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
1468-
rqstats->l4_rss_hash++;
1469-
break;
1470-
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
1471-
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
1472-
case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
1473-
skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
1474-
rqstats->l3_rss_hash++;
1475-
break;
1476-
}
1477-
}
1478-
if (enic->vxlan.vxlan_udp_port_number) {
1479-
switch (enic->vxlan.patch_level) {
1480-
case 0:
1481-
if (fcoe) {
1482-
encap = true;
1483-
outer_csum_ok = fcoe_fc_crc_ok;
1484-
}
1485-
break;
1486-
case 2:
1487-
if ((type == 7) &&
1488-
(rss_hash & BIT(0))) {
1489-
encap = true;
1490-
outer_csum_ok = (rss_hash & BIT(1)) &&
1491-
(rss_hash & BIT(2));
1492-
}
1493-
break;
1494-
}
1495-
}
1496-
1497-
/* Hardware does not provide whole packet checksum. It only
1498-
* provides pseudo checksum. Since hw validates the packet
1499-
* checksum but not provide us the checksum value. use
1500-
* CHECSUM_UNNECESSARY.
1501-
*
1502-
* In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
1503-
* inner csum_ok. outer_csum_ok is set by hw when outer udp
1504-
* csum is correct or is zero.
1505-
*/
1506-
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1507-
tcp_udp_csum_ok && outer_csum_ok &&
1508-
(ipv4_csum_ok || ipv6)) {
1509-
skb->ip_summed = CHECKSUM_UNNECESSARY;
1510-
skb->csum_level = encap;
1511-
if (encap)
1512-
rqstats->csum_unnecessary_encap++;
1513-
else
1514-
rqstats->csum_unnecessary++;
1515-
}
1516-
1517-
if (vlan_stripped) {
1518-
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1519-
rqstats->vlan_stripped++;
1520-
}
1521-
skb_mark_napi_id(skb, &enic->napi[rq->index]);
1522-
if (!(netdev->features & NETIF_F_GRO))
1523-
netif_receive_skb(skb);
1524-
else
1525-
napi_gro_receive(&enic->napi[q_number], skb);
1526-
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1527-
enic_intr_update_pkt_size(&cq->pkt_size_counter,
1528-
bytes_written);
1529-
} else {
1530-
1531-
/* Buffer overflow
1532-
*/
1533-
rqstats->pkt_truncated++;
1534-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
1535-
DMA_FROM_DEVICE);
1536-
dev_kfree_skb_any(skb);
1537-
buf->os_buf = NULL;
1538-
}
1539-
}
1540-
1541-
static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1542-
u8 type, u16 q_number, u16 completed_index, void *opaque)
1543-
{
1544-
struct enic *enic = vnic_dev_priv(vdev);
1545-
1546-
vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
1547-
completed_index, VNIC_RQ_RETURN_DESC,
1548-
enic_rq_indicate_buf, opaque);
1549-
1550-
return 0;
1551-
}
1552-
15531317
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
15541318
{
15551319
unsigned int intr = enic_msix_rq_intr(enic, rq->index);

0 commit comments

Comments
 (0)