@@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
74
74
struct ena_tx_buffer * tx_info );
75
75
static int ena_create_io_tx_queues_in_range (struct ena_adapter * adapter ,
76
76
int first_index , int count );
77
+ static void ena_free_all_io_tx_resources_in_range (struct ena_adapter * adapter ,
78
+ int first_index , int count );
77
79
78
80
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
79
81
static void ena_increase_stat (u64 * statp , u64 cnt ,
@@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
457
459
458
460
static int ena_setup_and_create_all_xdp_queues (struct ena_adapter * adapter )
459
461
{
462
+ u32 xdp_first_ring = adapter -> xdp_first_ring ;
463
+ u32 xdp_num_queues = adapter -> xdp_num_queues ;
460
464
int rc = 0 ;
461
465
462
- rc = ena_setup_tx_resources_in_range (adapter , adapter -> xdp_first_ring ,
463
- adapter -> xdp_num_queues );
466
+ rc = ena_setup_tx_resources_in_range (adapter , xdp_first_ring , xdp_num_queues );
464
467
if (rc )
465
468
goto setup_err ;
466
469
467
- rc = ena_create_io_tx_queues_in_range (adapter ,
468
- adapter -> xdp_first_ring ,
469
- adapter -> xdp_num_queues );
470
+ rc = ena_create_io_tx_queues_in_range (adapter , xdp_first_ring , xdp_num_queues );
470
471
if (rc )
471
472
goto create_err ;
472
473
473
474
return 0 ;
474
475
475
476
create_err :
476
- ena_free_all_io_tx_resources (adapter );
477
+ ena_free_all_io_tx_resources_in_range (adapter , xdp_first_ring , xdp_num_queues );
477
478
setup_err :
478
479
return rc ;
479
480
}
@@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1492
1493
if (unlikely (!skb ))
1493
1494
return NULL ;
1494
1495
1495
- /* sync this buffer for CPU use */
1496
- dma_sync_single_for_cpu (rx_ring -> dev ,
1497
- dma_unmap_addr (& rx_info -> ena_buf , paddr ) + pkt_offset ,
1498
- len ,
1499
- DMA_FROM_DEVICE );
1500
1496
skb_copy_to_linear_data (skb , buf_addr + buf_offset , len );
1501
1497
dma_sync_single_for_device (rx_ring -> dev ,
1502
1498
dma_unmap_addr (& rx_info -> ena_buf , paddr ) + pkt_offset ,
@@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1515
1511
1516
1512
buf_len = SKB_DATA_ALIGN (len + buf_offset + tailroom );
1517
1513
1518
- pre_reuse_paddr = dma_unmap_addr (& rx_info -> ena_buf , paddr );
1519
-
1520
1514
/* If XDP isn't loaded try to reuse part of the RX buffer */
1521
1515
reuse_rx_buf_page = !is_xdp_loaded &&
1522
1516
ena_try_rx_buf_page_reuse (rx_info , buf_len , len , pkt_offset );
1523
1517
1524
- dma_sync_single_for_cpu (rx_ring -> dev ,
1525
- pre_reuse_paddr + pkt_offset ,
1526
- len ,
1527
- DMA_FROM_DEVICE );
1528
-
1529
1518
if (!reuse_rx_buf_page )
1530
1519
ena_unmap_rx_buff_attrs (rx_ring , rx_info , DMA_ATTR_SKIP_CPU_SYNC );
1531
1520
@@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
1671
1660
}
1672
1661
}
1673
1662
1674
- static int ena_xdp_handle_buff (struct ena_ring * rx_ring , struct xdp_buff * xdp )
1663
+ static int ena_xdp_handle_buff (struct ena_ring * rx_ring , struct xdp_buff * xdp , u16 num_descs )
1675
1664
{
1676
1665
struct ena_rx_buffer * rx_info ;
1677
1666
int ret ;
1678
1667
1668
+ /* XDP multi-buffer packets not supported */
1669
+ if (unlikely (num_descs > 1 )) {
1670
+ netdev_err_once (rx_ring -> adapter -> netdev ,
1671
+ "xdp: dropped unsupported multi-buffer packets\n" );
1672
+ ena_increase_stat (& rx_ring -> rx_stats .xdp_drop , 1 , & rx_ring -> syncp );
1673
+ return ENA_XDP_DROP ;
1674
+ }
1675
+
1679
1676
rx_info = & rx_ring -> rx_buffer_info [rx_ring -> ena_bufs [0 ].req_id ];
1680
1677
xdp_prepare_buff (xdp , page_address (rx_info -> page ),
1681
1678
rx_info -> buf_offset ,
1682
1679
rx_ring -> ena_bufs [0 ].len , false);
1683
- /* If for some reason we received a bigger packet than
1684
- * we expect, then we simply drop it
1685
- */
1686
- if (unlikely (rx_ring -> ena_bufs [0 ].len > ENA_XDP_MAX_MTU ))
1687
- return ENA_XDP_DROP ;
1688
1680
1689
1681
ret = ena_xdp_execute (rx_ring , xdp );
1690
1682
@@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1719
1711
int xdp_flags = 0 ;
1720
1712
int total_len = 0 ;
1721
1713
int xdp_verdict ;
1714
+ u8 pkt_offset ;
1722
1715
int rc = 0 ;
1723
1716
int i ;
1724
1717
@@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1745
1738
1746
1739
/* First descriptor might have an offset set by the device */
1747
1740
rx_info = & rx_ring -> rx_buffer_info [rx_ring -> ena_bufs [0 ].req_id ];
1748
- rx_info -> buf_offset += ena_rx_ctx .pkt_offset ;
1741
+ pkt_offset = ena_rx_ctx .pkt_offset ;
1742
+ rx_info -> buf_offset += pkt_offset ;
1749
1743
1750
1744
netif_dbg (rx_ring -> adapter , rx_status , rx_ring -> netdev ,
1751
1745
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n" ,
1752
1746
rx_ring -> qid , ena_rx_ctx .descs , ena_rx_ctx .l3_proto ,
1753
1747
ena_rx_ctx .l4_proto , ena_rx_ctx .hash );
1754
1748
1749
+ dma_sync_single_for_cpu (rx_ring -> dev ,
1750
+ dma_unmap_addr (& rx_info -> ena_buf , paddr ) + pkt_offset ,
1751
+ rx_ring -> ena_bufs [0 ].len ,
1752
+ DMA_FROM_DEVICE );
1753
+
1755
1754
if (ena_xdp_present_ring (rx_ring ))
1756
- xdp_verdict = ena_xdp_handle_buff (rx_ring , & xdp );
1755
+ xdp_verdict = ena_xdp_handle_buff (rx_ring , & xdp , ena_rx_ctx . descs );
1757
1756
1758
1757
/* allocate skb and fill it */
1759
1758
if (xdp_verdict == ENA_XDP_PASS )
@@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1777
1776
if (xdp_verdict & ENA_XDP_FORWARDED ) {
1778
1777
ena_unmap_rx_buff_attrs (rx_ring ,
1779
1778
& rx_ring -> rx_buffer_info [req_id ],
1780
- 0 );
1779
+ DMA_ATTR_SKIP_CPU_SYNC );
1781
1780
rx_ring -> rx_buffer_info [req_id ].page = NULL ;
1782
1781
}
1783
1782
}
0 commit comments