Skip to content

Commit 154bb2f

Browse files
committed
Merge branch 'ena-driver-xdp-bug-fixes'
David Arinzon says: ==================== ENA driver XDP bug fixes This patchset contains multiple XDP-related bug fixes in the ENA driver. ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents f99cd56 + 4ab138c commit 154bb2f

File tree

2 files changed

+26
-30
lines changed

2 files changed

+26
-30
lines changed

drivers/net/ethernet/amazon/ena/ena_eth_com.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
328328
* compare it to the stored version, just create the meta
329329
*/
330330
if (io_sq->disable_meta_caching) {
331-
if (unlikely(!ena_tx_ctx->meta_valid))
332-
return -EINVAL;
333-
334331
*have_meta = true;
335332
return ena_com_create_meta(io_sq, ena_meta);
336333
}

drivers/net/ethernet/amazon/ena/ena_netdev.c

Lines changed: 26 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
7474
struct ena_tx_buffer *tx_info);
7575
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
7676
int first_index, int count);
77+
static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
78+
int first_index, int count);
7779

7880
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
7981
static void ena_increase_stat(u64 *statp, u64 cnt,
@@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
457459

458460
static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
459461
{
462+
u32 xdp_first_ring = adapter->xdp_first_ring;
463+
u32 xdp_num_queues = adapter->xdp_num_queues;
460464
int rc = 0;
461465

462-
rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
463-
adapter->xdp_num_queues);
466+
rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
464467
if (rc)
465468
goto setup_err;
466469

467-
rc = ena_create_io_tx_queues_in_range(adapter,
468-
adapter->xdp_first_ring,
469-
adapter->xdp_num_queues);
470+
rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
470471
if (rc)
471472
goto create_err;
472473

473474
return 0;
474475

475476
create_err:
476-
ena_free_all_io_tx_resources(adapter);
477+
ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
477478
setup_err:
478479
return rc;
479480
}
@@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
14921493
if (unlikely(!skb))
14931494
return NULL;
14941495

1495-
/* sync this buffer for CPU use */
1496-
dma_sync_single_for_cpu(rx_ring->dev,
1497-
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1498-
len,
1499-
DMA_FROM_DEVICE);
15001496
skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
15011497
dma_sync_single_for_device(rx_ring->dev,
15021498
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
@@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
15151511

15161512
buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
15171513

1518-
pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
1519-
15201514
/* If XDP isn't loaded try to reuse part of the RX buffer */
15211515
reuse_rx_buf_page = !is_xdp_loaded &&
15221516
ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
15231517

1524-
dma_sync_single_for_cpu(rx_ring->dev,
1525-
pre_reuse_paddr + pkt_offset,
1526-
len,
1527-
DMA_FROM_DEVICE);
1528-
15291518
if (!reuse_rx_buf_page)
15301519
ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
15311520

@@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
16711660
}
16721661
}
16731662

1674-
static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1663+
static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
16751664
{
16761665
struct ena_rx_buffer *rx_info;
16771666
int ret;
16781667

1668+
/* XDP multi-buffer packets not supported */
1669+
if (unlikely(num_descs > 1)) {
1670+
netdev_err_once(rx_ring->adapter->netdev,
1671+
"xdp: dropped unsupported multi-buffer packets\n");
1672+
ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
1673+
return ENA_XDP_DROP;
1674+
}
1675+
16791676
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
16801677
xdp_prepare_buff(xdp, page_address(rx_info->page),
16811678
rx_info->buf_offset,
16821679
rx_ring->ena_bufs[0].len, false);
1683-
/* If for some reason we received a bigger packet than
1684-
* we expect, then we simply drop it
1685-
*/
1686-
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1687-
return ENA_XDP_DROP;
16881680

16891681
ret = ena_xdp_execute(rx_ring, xdp);
16901682

@@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
17191711
int xdp_flags = 0;
17201712
int total_len = 0;
17211713
int xdp_verdict;
1714+
u8 pkt_offset;
17221715
int rc = 0;
17231716
int i;
17241717

@@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
17451738

17461739
/* First descriptor might have an offset set by the device */
17471740
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1748-
rx_info->buf_offset += ena_rx_ctx.pkt_offset;
1741+
pkt_offset = ena_rx_ctx.pkt_offset;
1742+
rx_info->buf_offset += pkt_offset;
17491743

17501744
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
17511745
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
17521746
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
17531747
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
17541748

1749+
dma_sync_single_for_cpu(rx_ring->dev,
1750+
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1751+
rx_ring->ena_bufs[0].len,
1752+
DMA_FROM_DEVICE);
1753+
17551754
if (ena_xdp_present_ring(rx_ring))
1756-
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1755+
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
17571756

17581757
/* allocate skb and fill it */
17591758
if (xdp_verdict == ENA_XDP_PASS)
@@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
17771776
if (xdp_verdict & ENA_XDP_FORWARDED) {
17781777
ena_unmap_rx_buff_attrs(rx_ring,
17791778
&rx_ring->rx_buffer_info[req_id],
1780-
0);
1779+
DMA_ATTR_SKIP_CPU_SYNC);
17811780
rx_ring->rx_buffer_info[req_id].page = NULL;
17821781
}
17831782
}

0 commit comments

Comments
 (0)