Skip to content

Commit 92a6d7a

Browse files
shailend-gkuba-moo
authored andcommitted
gve: Refactor gve_open and gve_close
gve_open is rewritten to be composed of two funcs: gve_queues_mem_alloc and gve_queues_start. The former only allocates queue resources without doing anything to install the queues, which is taken up by the latter. Similarly gve_close is split into gve_queues_stop and gve_queues_mem_free. Separating the acts of queue resource allocation and making the queue become live help with subsequent changes that aim to not take down the datapath when applying new configurations. Signed-off-by: Shailend Chand <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Reviewed-by: Jeroen de Borst <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent f13697c commit 92a6d7a

File tree

1 file changed

+119
-40
lines changed

1 file changed

+119
-40
lines changed

drivers/net/ethernet/google/gve/gve_main.c

Lines changed: 119 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1350,45 +1350,99 @@ static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
13501350
}
13511351
}
13521352

1353-
static int gve_open(struct net_device *dev)
1353+
static void gve_queues_mem_free(struct gve_priv *priv,
1354+
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1355+
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1356+
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1357+
{
1358+
gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
1359+
gve_free_qpls(priv, qpls_alloc_cfg);
1360+
}
1361+
1362+
static int gve_queues_mem_alloc(struct gve_priv *priv,
1363+
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1364+
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1365+
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1366+
{
1367+
int err;
1368+
1369+
err = gve_alloc_qpls(priv, qpls_alloc_cfg);
1370+
if (err) {
1371+
netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
1372+
return err;
1373+
}
1374+
tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
1375+
rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
1376+
err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
1377+
if (err) {
1378+
netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
1379+
goto free_qpls;
1380+
}
1381+
1382+
return 0;
1383+
1384+
free_qpls:
1385+
gve_free_qpls(priv, qpls_alloc_cfg);
1386+
return err;
1387+
}
1388+
1389+
static void gve_queues_mem_remove(struct gve_priv *priv)
13541390
{
13551391
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
13561392
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
13571393
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
1358-
struct gve_priv *priv = netdev_priv(dev);
1394+
1395+
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1396+
&tx_alloc_cfg, &rx_alloc_cfg);
1397+
gve_queues_mem_free(priv, &qpls_alloc_cfg,
1398+
&tx_alloc_cfg, &rx_alloc_cfg);
1399+
priv->qpls = NULL;
1400+
priv->tx = NULL;
1401+
priv->rx = NULL;
1402+
}
1403+
1404+
/* The passed-in queue memory is stored into priv and the queues are made live.
1405+
* No memory is allocated. Passed-in memory is freed on errors.
1406+
*/
1407+
static int gve_queues_start(struct gve_priv *priv,
1408+
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
1409+
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1410+
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
1411+
{
1412+
struct net_device *dev = priv->dev;
13591413
int err;
13601414

1415+
/* Record new resources into priv */
1416+
priv->qpls = qpls_alloc_cfg->qpls;
1417+
priv->tx = tx_alloc_cfg->tx;
1418+
priv->rx = rx_alloc_cfg->rx;
1419+
1420+
/* Record new configs into priv */
1421+
priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
1422+
priv->tx_cfg = *tx_alloc_cfg->qcfg;
1423+
priv->rx_cfg = *rx_alloc_cfg->qcfg;
1424+
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
1425+
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
1426+
13611427
if (priv->xdp_prog)
13621428
priv->num_xdp_queues = priv->rx_cfg.num_queues;
13631429
else
13641430
priv->num_xdp_queues = 0;
13651431

1366-
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1367-
&tx_alloc_cfg, &rx_alloc_cfg);
1368-
err = gve_alloc_qpls(priv, &qpls_alloc_cfg);
1369-
if (err)
1370-
return err;
1371-
priv->qpls = qpls_alloc_cfg.qpls;
1372-
tx_alloc_cfg.qpls = priv->qpls;
1373-
rx_alloc_cfg.qpls = priv->qpls;
1374-
err = gve_alloc_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1375-
if (err)
1376-
goto free_qpls;
1377-
1378-
gve_tx_start_rings(priv, 0, tx_alloc_cfg.num_rings);
1379-
gve_rx_start_rings(priv, rx_alloc_cfg.qcfg->num_queues);
1432+
gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
1433+
gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
13801434
gve_init_sync_stats(priv);
13811435

13821436
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
13831437
if (err)
1384-
goto free_rings;
1438+
goto stop_and_free_rings;
13851439
err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
13861440
if (err)
1387-
goto free_rings;
1441+
goto stop_and_free_rings;
13881442

13891443
err = gve_reg_xdp_info(priv, dev);
13901444
if (err)
1391-
goto free_rings;
1445+
goto stop_and_free_rings;
13921446

13931447
err = gve_register_qpls(priv);
13941448
if (err)
@@ -1416,37 +1470,53 @@ static int gve_open(struct net_device *dev)
14161470
priv->interface_up_cnt++;
14171471
return 0;
14181472

1419-
free_rings:
1420-
gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
1421-
gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
1422-
gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1423-
free_qpls:
1424-
gve_free_qpls(priv, &qpls_alloc_cfg);
1425-
return err;
1426-
14271473
reset:
1428-
/* This must have been called from a reset due to the rtnl lock
1429-
* so just return at this point.
1430-
*/
14311474
if (gve_get_reset_in_progress(priv))
1432-
return err;
1433-
/* Otherwise reset before returning */
1475+
goto stop_and_free_rings;
14341476
gve_reset_and_teardown(priv, true);
14351477
/* if this fails there is nothing we can do so just ignore the return */
14361478
gve_reset_recovery(priv, false);
14371479
/* return the original error */
14381480
return err;
1481+
stop_and_free_rings:
1482+
gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
1483+
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
1484+
gve_queues_mem_remove(priv);
1485+
return err;
14391486
}
14401487

1441-
static int gve_close(struct net_device *dev)
1488+
static int gve_open(struct net_device *dev)
14421489
{
14431490
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
14441491
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
14451492
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
14461493
struct gve_priv *priv = netdev_priv(dev);
14471494
int err;
14481495

1449-
netif_carrier_off(dev);
1496+
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1497+
&tx_alloc_cfg, &rx_alloc_cfg);
1498+
1499+
err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
1500+
&tx_alloc_cfg, &rx_alloc_cfg);
1501+
if (err)
1502+
return err;
1503+
1504+
/* No need to free on error: ownership of resources is lost after
1505+
* calling gve_queues_start.
1506+
*/
1507+
err = gve_queues_start(priv, &qpls_alloc_cfg,
1508+
&tx_alloc_cfg, &rx_alloc_cfg);
1509+
if (err)
1510+
return err;
1511+
1512+
return 0;
1513+
}
1514+
1515+
static int gve_queues_stop(struct gve_priv *priv)
1516+
{
1517+
int err;
1518+
1519+
netif_carrier_off(priv->dev);
14501520
if (gve_get_device_rings_ok(priv)) {
14511521
gve_turndown(priv);
14521522
gve_drain_page_cache(priv);
@@ -1462,12 +1532,8 @@ static int gve_close(struct net_device *dev)
14621532

14631533
gve_unreg_xdp_info(priv);
14641534

1465-
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
1466-
&tx_alloc_cfg, &rx_alloc_cfg);
1467-
gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
1468-
gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
1469-
gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
1470-
gve_free_qpls(priv, &qpls_alloc_cfg);
1535+
gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
1536+
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
14711537

14721538
priv->interface_down_cnt++;
14731539
return 0;
@@ -1483,6 +1549,19 @@ static int gve_close(struct net_device *dev)
14831549
return gve_reset_recovery(priv, false);
14841550
}
14851551

1552+
static int gve_close(struct net_device *dev)
1553+
{
1554+
struct gve_priv *priv = netdev_priv(dev);
1555+
int err;
1556+
1557+
err = gve_queues_stop(priv);
1558+
if (err)
1559+
return err;
1560+
1561+
gve_queues_mem_remove(priv);
1562+
return 0;
1563+
}
1564+
14861565
static int gve_remove_xdp_queues(struct gve_priv *priv)
14871566
{
14881567
int qpl_start_id;

0 commit comments

Comments
 (0)