Skip to content

Commit d24cb52

Browse files
johndale88kuba-moo
authored andcommitted
enic: Use the Page Pool API for RX
The Page Pool API improves bandwidth and CPU overhead by recycling pages instead of allocating new buffers in the driver. Make use of page pool fragment allocation for smaller MTUs so that multiple packets can share a page. For MTUs larger than PAGE_SIZE, adjust the 'order' page parameter so that contiguous pages can be used to receive the larger packets. The RQ descriptor field 'os_buf' is repurposed to hold page pointers allocated from page_pool instead of SKBs. When packets arrive, SKBs are allocated and the page pointers are attached instead of preallocating SKBs. 'alloc_fail' netdev statistic is incremented when page_pool_dev_alloc() fails. Co-developed-by: Nelson Escobar <[email protected]> Signed-off-by: Nelson Escobar <[email protected]> Co-developed-by: Satish Kharat <[email protected]> Signed-off-by: Satish Kharat <[email protected]> Signed-off-by: John Daley <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent eab3726 commit d24cb52

File tree

4 files changed

+71
-61
lines changed

4 files changed

+71
-61
lines changed

drivers/net/ethernet/cisco/enic/enic.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include "vnic_nic.h"
1818
#include "vnic_rss.h"
1919
#include <linux/irq.h>
20+
#include <net/page_pool/helpers.h>
2021

2122
#define DRV_NAME "enic"
2223
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
@@ -158,6 +159,7 @@ struct enic_rq_stats {
158159
u64 pkt_truncated; /* truncated pkts */
159160
u64 no_skb; /* out of skbs */
160161
u64 desc_skip; /* Rx pkt went into later buffer */
162+
u64 pp_alloc_fail; /* page pool alloc failure */
161163
};
162164

163165
struct enic_wq {
@@ -169,6 +171,7 @@ struct enic_wq {
169171
struct enic_rq {
170172
struct vnic_rq vrq;
171173
struct enic_rq_stats stats;
174+
struct page_pool *pool;
172175
} ____cacheline_aligned;
173176

174177
/* Per-instance private data structure */

drivers/net/ethernet/cisco/enic/enic_main.c

Lines changed: 31 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1736,6 +1736,17 @@ static int enic_open(struct net_device *netdev)
17361736
struct enic *enic = netdev_priv(netdev);
17371737
unsigned int i;
17381738
int err, ret;
1739+
unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
1740+
struct page_pool_params pp_params = {
1741+
.order = get_order(max_pkt_len),
1742+
.pool_size = enic->config.rq_desc_count,
1743+
.nid = dev_to_node(&enic->pdev->dev),
1744+
.dev = &enic->pdev->dev,
1745+
.dma_dir = DMA_FROM_DEVICE,
1746+
.max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
1747+
.netdev = netdev,
1748+
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1749+
};
17391750

17401751
err = enic_request_intr(enic);
17411752
if (err) {
@@ -1753,6 +1764,16 @@ static int enic_open(struct net_device *netdev)
17531764
}
17541765

17551766
for (i = 0; i < enic->rq_count; i++) {
1767+
/* create a page pool for each RQ */
1768+
pp_params.napi = &enic->napi[i];
1769+
pp_params.queue_idx = i;
1770+
enic->rq[i].pool = page_pool_create(&pp_params);
1771+
if (IS_ERR(enic->rq[i].pool)) {
1772+
err = PTR_ERR(enic->rq[i].pool);
1773+
enic->rq[i].pool = NULL;
1774+
goto err_out_free_rq;
1775+
}
1776+
17561777
/* enable rq before updating rq desc */
17571778
vnic_rq_enable(&enic->rq[i].vrq);
17581779
vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
@@ -1793,8 +1814,11 @@ static int enic_open(struct net_device *netdev)
17931814
err_out_free_rq:
17941815
for (i = 0; i < enic->rq_count; i++) {
17951816
ret = vnic_rq_disable(&enic->rq[i].vrq);
1796-
if (!ret)
1817+
if (!ret) {
17971818
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
1819+
page_pool_destroy(enic->rq[i].pool);
1820+
enic->rq[i].pool = NULL;
1821+
}
17981822
}
17991823
enic_dev_notify_unset(enic);
18001824
err_out_free_intr:
@@ -1852,8 +1876,11 @@ static int enic_stop(struct net_device *netdev)
18521876

18531877
for (i = 0; i < enic->wq_count; i++)
18541878
vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
1855-
for (i = 0; i < enic->rq_count; i++)
1879+
for (i = 0; i < enic->rq_count; i++) {
18561880
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
1881+
page_pool_destroy(enic->rq[i].pool);
1882+
enic->rq[i].pool = NULL;
1883+
}
18571884
for (i = 0; i < enic->cq_count; i++)
18581885
vnic_cq_clean(&enic->cq[i]);
18591886
for (i = 0; i < enic->intr_count; i++)
@@ -2363,6 +2390,7 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
23632390
rxs->hw_drop_overruns = rqstats->pkt_truncated;
23642391
rxs->csum_unnecessary = rqstats->csum_unnecessary +
23652392
rqstats->csum_unnecessary_encap;
2393+
rxs->alloc_fail = rqstats->pp_alloc_fail;
23662394
}
23672395

23682396
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -2390,6 +2418,7 @@ static void enic_get_base_stats(struct net_device *dev,
23902418
rxs->hw_drops = 0;
23912419
rxs->hw_drop_overruns = 0;
23922420
rxs->csum_unnecessary = 0;
2421+
rxs->alloc_fail = 0;
23932422
txs->bytes = 0;
23942423
txs->packets = 0;
23952424
txs->csum_none = 0;

drivers/net/ethernet/cisco/enic/enic_rq.c

Lines changed: 35 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -21,25 +21,6 @@ static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
2121
pkt_size->small_pkt_bytes_cnt += pkt_len;
2222
}
2323

24-
static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
25-
struct vnic_rq_buf *buf, u16 len)
26-
{
27-
struct enic *enic = netdev_priv(netdev);
28-
struct sk_buff *new_skb;
29-
30-
if (len > enic->rx_copybreak)
31-
return false;
32-
new_skb = netdev_alloc_skb_ip_align(netdev, len);
33-
if (!new_skb)
34-
return false;
35-
dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
36-
DMA_FROM_DEVICE);
37-
memcpy(new_skb->data, (*skb)->data, len);
38-
*skb = new_skb;
39-
40-
return true;
41-
}
42-
4324
int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
4425
u16 q_number, u16 completed_index, void *opaque)
4526
{
@@ -142,57 +123,56 @@ int enic_rq_alloc_buf(struct vnic_rq *rq)
142123
{
143124
struct enic *enic = vnic_dev_priv(rq->vdev);
144125
struct net_device *netdev = enic->netdev;
145-
struct sk_buff *skb;
126+
struct enic_rq *erq = &enic->rq[rq->index];
127+
struct enic_rq_stats *rqstats = &erq->stats;
128+
unsigned int offset = 0;
146129
unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
147130
unsigned int os_buf_index = 0;
148131
dma_addr_t dma_addr;
149132
struct vnic_rq_buf *buf = rq->to_use;
133+
struct page *page;
134+
unsigned int truesize = len;
150135

151136
if (buf->os_buf) {
152137
enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
153138
buf->len);
154139

155140
return 0;
156141
}
157-
skb = netdev_alloc_skb_ip_align(netdev, len);
158-
if (!skb) {
159-
enic->rq[rq->index].stats.no_skb++;
160-
return -ENOMEM;
161-
}
162142

163-
dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
164-
DMA_FROM_DEVICE);
165-
if (unlikely(enic_dma_map_check(enic, dma_addr))) {
166-
dev_kfree_skb(skb);
143+
page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
144+
if (unlikely(!page)) {
145+
rqstats->pp_alloc_fail++;
167146
return -ENOMEM;
168147
}
169-
170-
enic_queue_rq_desc(rq, skb, os_buf_index, dma_addr, len);
148+
buf->offset = offset;
149+
buf->truesize = truesize;
150+
dma_addr = page_pool_get_dma_addr(page) + offset;
151+
enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
171152

172153
return 0;
173154
}
174155

175156
void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
176157
{
177158
struct enic *enic = vnic_dev_priv(rq->vdev);
159+
struct enic_rq *erq = &enic->rq[rq->index];
178160

179161
if (!buf->os_buf)
180162
return;
181163

182-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
183-
DMA_FROM_DEVICE);
184-
dev_kfree_skb_any(buf->os_buf);
164+
page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
185165
buf->os_buf = NULL;
186166
}
187167

188168
void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
189169
struct vnic_rq_buf *buf, int skipped, void *opaque)
190170
{
191171
struct enic *enic = vnic_dev_priv(rq->vdev);
192-
struct net_device *netdev = enic->netdev;
193172
struct sk_buff *skb;
194173
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
195174
struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
175+
struct napi_struct *napi;
196176

197177
u8 type, color, eop, sop, ingress_port, vlan_stripped;
198178
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -208,8 +188,6 @@ void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
208188
return;
209189
}
210190

211-
skb = buf->os_buf;
212-
213191
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color,
214192
&q_number, &completed_index, &ingress_port, &fcoe,
215193
&eop, &sop, &rss_type, &csum_not_calc, &rss_hash,
@@ -219,48 +197,46 @@ void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc,
219197
&tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
220198
&fcs_ok);
221199

222-
if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written)) {
223-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
224-
DMA_FROM_DEVICE);
225-
dev_kfree_skb_any(skb);
226-
buf->os_buf = NULL;
227-
200+
if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
228201
return;
229-
}
230202

231203
if (eop && bytes_written > 0) {
232204
/* Good receive
233205
*/
234206
rqstats->bytes += bytes_written;
235-
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
236-
buf->os_buf = NULL;
237-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
238-
buf->len, DMA_FROM_DEVICE);
207+
napi = &enic->napi[rq->index];
208+
skb = napi_get_frags(napi);
209+
if (unlikely(!skb)) {
210+
net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
211+
enic->netdev->name, rq->index,
212+
completed_index);
213+
rqstats->no_skb++;
214+
return;
239215
}
216+
240217
prefetch(skb->data - NET_IP_ALIGN);
241218

242-
skb_put(skb, bytes_written);
243-
skb->protocol = eth_type_trans(skb, netdev);
219+
dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
220+
bytes_written, DMA_FROM_DEVICE);
221+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
222+
(struct page *)buf->os_buf, buf->offset,
223+
bytes_written, buf->truesize);
244224
skb_record_rx_queue(skb, q_number);
245225
enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
246226
fcoe_fc_crc_ok, vlan_stripped,
247227
csum_not_calc, tcp_udp_csum_ok, ipv6,
248228
ipv4_csum_ok, vlan_tci, skb);
249-
skb_mark_napi_id(skb, &enic->napi[rq->index]);
250-
if (!(netdev->features & NETIF_F_GRO))
251-
netif_receive_skb(skb);
252-
else
253-
napi_gro_receive(&enic->napi[q_number], skb);
229+
skb_mark_for_recycle(skb);
230+
napi_gro_frags(napi);
254231
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
255232
enic_intr_update_pkt_size(&cq->pkt_size_counter,
256233
bytes_written);
234+
buf->os_buf = NULL;
235+
buf->dma_addr = 0;
236+
buf = buf->next;
257237
} else {
258238
/* Buffer overflow
259239
*/
260240
rqstats->pkt_truncated++;
261-
dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
262-
DMA_FROM_DEVICE);
263-
dev_kfree_skb_any(skb);
264-
buf->os_buf = NULL;
265241
}
266242
}

drivers/net/ethernet/cisco/enic/vnic_rq.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
6161
unsigned int index;
6262
void *desc;
6363
uint64_t wr_id;
64+
unsigned int offset;
65+
unsigned int truesize;
6466
};
6567

6668
enum enic_poll_state {

0 commit comments

Comments
 (0)