17
17
#include <linux/utsname.h>
18
18
#include <linux/version.h>
19
19
#include <net/sch_generic.h>
20
+ #include <net/xdp_sock_drv.h>
20
21
#include "gve.h"
21
22
#include "gve_dqo.h"
22
23
#include "gve_adminq.h"
@@ -1188,6 +1189,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
1188
1189
struct gve_rx_ring * rx ;
1189
1190
int err = 0 ;
1190
1191
int i , j ;
1192
+ u32 tx_qid ;
1191
1193
1192
1194
if (!priv -> num_xdp_queues )
1193
1195
return 0 ;
@@ -1204,6 +1206,24 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
1204
1206
MEM_TYPE_PAGE_SHARED , NULL );
1205
1207
if (err )
1206
1208
goto err ;
1209
+ rx -> xsk_pool = xsk_get_pool_from_qid (dev , i );
1210
+ if (rx -> xsk_pool ) {
1211
+ err = xdp_rxq_info_reg (& rx -> xsk_rxq , dev , i ,
1212
+ napi -> napi_id );
1213
+ if (err )
1214
+ goto err ;
1215
+ err = xdp_rxq_info_reg_mem_model (& rx -> xsk_rxq ,
1216
+ MEM_TYPE_XSK_BUFF_POOL , NULL );
1217
+ if (err )
1218
+ goto err ;
1219
+ xsk_pool_set_rxq_info (rx -> xsk_pool ,
1220
+ & rx -> xsk_rxq );
1221
+ }
1222
+ }
1223
+
1224
+ for (i = 0 ; i < priv -> num_xdp_queues ; i ++ ) {
1225
+ tx_qid = gve_xdp_tx_queue_id (priv , i );
1226
+ priv -> tx [tx_qid ].xsk_pool = xsk_get_pool_from_qid (dev , i );
1207
1227
}
1208
1228
return 0 ;
1209
1229
@@ -1212,13 +1232,15 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
1212
1232
rx = & priv -> rx [j ];
1213
1233
if (xdp_rxq_info_is_reg (& rx -> xdp_rxq ))
1214
1234
xdp_rxq_info_unreg (& rx -> xdp_rxq );
1235
+ if (xdp_rxq_info_is_reg (& rx -> xsk_rxq ))
1236
+ xdp_rxq_info_unreg (& rx -> xsk_rxq );
1215
1237
}
1216
1238
return err ;
1217
1239
}
1218
1240
1219
1241
static void gve_unreg_xdp_info (struct gve_priv * priv )
1220
1242
{
1221
- int i ;
1243
+ int i , tx_qid ;
1222
1244
1223
1245
if (!priv -> num_xdp_queues )
1224
1246
return ;
@@ -1227,6 +1249,15 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
1227
1249
struct gve_rx_ring * rx = & priv -> rx [i ];
1228
1250
1229
1251
xdp_rxq_info_unreg (& rx -> xdp_rxq );
1252
+ if (rx -> xsk_pool ) {
1253
+ xdp_rxq_info_unreg (& rx -> xsk_rxq );
1254
+ rx -> xsk_pool = NULL ;
1255
+ }
1256
+ }
1257
+
1258
+ for (i = 0 ; i < priv -> num_xdp_queues ; i ++ ) {
1259
+ tx_qid = gve_xdp_tx_queue_id (priv , i );
1260
+ priv -> tx [tx_qid ].xsk_pool = NULL ;
1230
1261
}
1231
1262
}
1232
1263
@@ -1469,6 +1500,140 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
1469
1500
return err ;
1470
1501
}
1471
1502
1503
+ static int gve_xsk_pool_enable (struct net_device * dev ,
1504
+ struct xsk_buff_pool * pool ,
1505
+ u16 qid )
1506
+ {
1507
+ struct gve_priv * priv = netdev_priv (dev );
1508
+ struct napi_struct * napi ;
1509
+ struct gve_rx_ring * rx ;
1510
+ int tx_qid ;
1511
+ int err ;
1512
+
1513
+ if (qid >= priv -> rx_cfg .num_queues ) {
1514
+ dev_err (& priv -> pdev -> dev , "xsk pool invalid qid %d" , qid );
1515
+ return - EINVAL ;
1516
+ }
1517
+ if (xsk_pool_get_rx_frame_size (pool ) <
1518
+ priv -> dev -> max_mtu + sizeof (struct ethhdr )) {
1519
+ dev_err (& priv -> pdev -> dev , "xsk pool frame_len too small" );
1520
+ return - EINVAL ;
1521
+ }
1522
+
1523
+ err = xsk_pool_dma_map (pool , & priv -> pdev -> dev ,
1524
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING );
1525
+ if (err )
1526
+ return err ;
1527
+
1528
+ /* If XDP prog is not installed, return */
1529
+ if (!priv -> xdp_prog )
1530
+ return 0 ;
1531
+
1532
+ rx = & priv -> rx [qid ];
1533
+ napi = & priv -> ntfy_blocks [rx -> ntfy_id ].napi ;
1534
+ err = xdp_rxq_info_reg (& rx -> xsk_rxq , dev , qid , napi -> napi_id );
1535
+ if (err )
1536
+ goto err ;
1537
+
1538
+ err = xdp_rxq_info_reg_mem_model (& rx -> xsk_rxq ,
1539
+ MEM_TYPE_XSK_BUFF_POOL , NULL );
1540
+ if (err )
1541
+ goto err ;
1542
+
1543
+ xsk_pool_set_rxq_info (pool , & rx -> xsk_rxq );
1544
+ rx -> xsk_pool = pool ;
1545
+
1546
+ tx_qid = gve_xdp_tx_queue_id (priv , qid );
1547
+ priv -> tx [tx_qid ].xsk_pool = pool ;
1548
+
1549
+ return 0 ;
1550
+ err :
1551
+ if (xdp_rxq_info_is_reg (& rx -> xsk_rxq ))
1552
+ xdp_rxq_info_unreg (& rx -> xsk_rxq );
1553
+
1554
+ xsk_pool_dma_unmap (pool ,
1555
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING );
1556
+ return err ;
1557
+ }
1558
+
1559
+ static int gve_xsk_pool_disable (struct net_device * dev ,
1560
+ u16 qid )
1561
+ {
1562
+ struct gve_priv * priv = netdev_priv (dev );
1563
+ struct napi_struct * napi_rx ;
1564
+ struct napi_struct * napi_tx ;
1565
+ struct xsk_buff_pool * pool ;
1566
+ int tx_qid ;
1567
+
1568
+ pool = xsk_get_pool_from_qid (dev , qid );
1569
+ if (!pool )
1570
+ return - EINVAL ;
1571
+ if (qid >= priv -> rx_cfg .num_queues )
1572
+ return - EINVAL ;
1573
+
1574
+ /* If XDP prog is not installed, unmap DMA and return */
1575
+ if (!priv -> xdp_prog )
1576
+ goto done ;
1577
+
1578
+ tx_qid = gve_xdp_tx_queue_id (priv , qid );
1579
+ if (!netif_running (dev )) {
1580
+ priv -> rx [qid ].xsk_pool = NULL ;
1581
+ xdp_rxq_info_unreg (& priv -> rx [qid ].xsk_rxq );
1582
+ priv -> tx [tx_qid ].xsk_pool = NULL ;
1583
+ goto done ;
1584
+ }
1585
+
1586
+ napi_rx = & priv -> ntfy_blocks [priv -> rx [qid ].ntfy_id ].napi ;
1587
+ napi_disable (napi_rx ); /* make sure current rx poll is done */
1588
+
1589
+ napi_tx = & priv -> ntfy_blocks [priv -> tx [tx_qid ].ntfy_id ].napi ;
1590
+ napi_disable (napi_tx ); /* make sure current tx poll is done */
1591
+
1592
+ priv -> rx [qid ].xsk_pool = NULL ;
1593
+ xdp_rxq_info_unreg (& priv -> rx [qid ].xsk_rxq );
1594
+ priv -> tx [tx_qid ].xsk_pool = NULL ;
1595
+ smp_mb (); /* Make sure it is visible to the workers on datapath */
1596
+
1597
+ napi_enable (napi_rx );
1598
+ if (gve_rx_work_pending (& priv -> rx [qid ]))
1599
+ napi_schedule (napi_rx );
1600
+
1601
+ napi_enable (napi_tx );
1602
+ if (gve_tx_clean_pending (priv , & priv -> tx [tx_qid ]))
1603
+ napi_schedule (napi_tx );
1604
+
1605
+ done :
1606
+ xsk_pool_dma_unmap (pool ,
1607
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING );
1608
+ return 0 ;
1609
+ }
1610
+
1611
+ static int gve_xsk_wakeup (struct net_device * dev , u32 queue_id , u32 flags )
1612
+ {
1613
+ struct gve_priv * priv = netdev_priv (dev );
1614
+ int tx_queue_id = gve_xdp_tx_queue_id (priv , queue_id );
1615
+
1616
+ if (queue_id >= priv -> rx_cfg .num_queues || !priv -> xdp_prog )
1617
+ return - EINVAL ;
1618
+
1619
+ if (flags & XDP_WAKEUP_TX ) {
1620
+ struct gve_tx_ring * tx = & priv -> tx [tx_queue_id ];
1621
+ struct napi_struct * napi =
1622
+ & priv -> ntfy_blocks [tx -> ntfy_id ].napi ;
1623
+
1624
+ if (!napi_if_scheduled_mark_missed (napi )) {
1625
+ /* Call local_bh_enable to trigger SoftIRQ processing */
1626
+ local_bh_disable ();
1627
+ napi_schedule (napi );
1628
+ local_bh_enable ();
1629
+ }
1630
+
1631
+ tx -> xdp_xsk_wakeup ++ ;
1632
+ }
1633
+
1634
+ return 0 ;
1635
+ }
1636
+
1472
1637
static int verify_xdp_configuration (struct net_device * dev )
1473
1638
{
1474
1639
struct gve_priv * priv = netdev_priv (dev );
@@ -1512,6 +1677,11 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1512
1677
switch (xdp -> command ) {
1513
1678
case XDP_SETUP_PROG :
1514
1679
return gve_set_xdp (priv , xdp -> prog , xdp -> extack );
1680
+ case XDP_SETUP_XSK_POOL :
1681
+ if (xdp -> xsk .pool )
1682
+ return gve_xsk_pool_enable (dev , xdp -> xsk .pool , xdp -> xsk .queue_id );
1683
+ else
1684
+ return gve_xsk_pool_disable (dev , xdp -> xsk .queue_id );
1515
1685
default :
1516
1686
return - EINVAL ;
1517
1687
}
@@ -1713,6 +1883,7 @@ static const struct net_device_ops gve_netdev_ops = {
1713
1883
.ndo_set_features = gve_set_features ,
1714
1884
.ndo_bpf = gve_xdp ,
1715
1885
.ndo_xdp_xmit = gve_xdp_xmit ,
1886
+ .ndo_xsk_wakeup = gve_xsk_wakeup ,
1716
1887
};
1717
1888
1718
1889
static void gve_handle_status (struct gve_priv * priv , u32 status )
@@ -1838,6 +2009,7 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
1838
2009
priv -> dev -> xdp_features = NETDEV_XDP_ACT_BASIC ;
1839
2010
priv -> dev -> xdp_features |= NETDEV_XDP_ACT_REDIRECT ;
1840
2011
priv -> dev -> xdp_features |= NETDEV_XDP_ACT_NDO_XMIT ;
2012
+ priv -> dev -> xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY ;
1841
2013
} else {
1842
2014
priv -> dev -> xdp_features = 0 ;
1843
2015
}
0 commit comments