@@ -1350,45 +1350,99 @@ static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
1350
1350
}
1351
1351
}
1352
1352
1353
- static int gve_open (struct net_device * dev )
1353
+ static void gve_queues_mem_free (struct gve_priv * priv ,
1354
+ struct gve_qpls_alloc_cfg * qpls_alloc_cfg ,
1355
+ struct gve_tx_alloc_rings_cfg * tx_alloc_cfg ,
1356
+ struct gve_rx_alloc_rings_cfg * rx_alloc_cfg )
1357
+ {
1358
+ gve_free_rings (priv , tx_alloc_cfg , rx_alloc_cfg );
1359
+ gve_free_qpls (priv , qpls_alloc_cfg );
1360
+ }
1361
+
1362
+ static int gve_queues_mem_alloc (struct gve_priv * priv ,
1363
+ struct gve_qpls_alloc_cfg * qpls_alloc_cfg ,
1364
+ struct gve_tx_alloc_rings_cfg * tx_alloc_cfg ,
1365
+ struct gve_rx_alloc_rings_cfg * rx_alloc_cfg )
1366
+ {
1367
+ int err ;
1368
+
1369
+ err = gve_alloc_qpls (priv , qpls_alloc_cfg );
1370
+ if (err ) {
1371
+ netif_err (priv , drv , priv -> dev , "Failed to alloc QPLs\n" );
1372
+ return err ;
1373
+ }
1374
+ tx_alloc_cfg -> qpls = qpls_alloc_cfg -> qpls ;
1375
+ rx_alloc_cfg -> qpls = qpls_alloc_cfg -> qpls ;
1376
+ err = gve_alloc_rings (priv , tx_alloc_cfg , rx_alloc_cfg );
1377
+ if (err ) {
1378
+ netif_err (priv , drv , priv -> dev , "Failed to alloc rings\n" );
1379
+ goto free_qpls ;
1380
+ }
1381
+
1382
+ return 0 ;
1383
+
1384
+ free_qpls :
1385
+ gve_free_qpls (priv , qpls_alloc_cfg );
1386
+ return err ;
1387
+ }
1388
+
1389
+ static void gve_queues_mem_remove (struct gve_priv * priv )
1354
1390
{
1355
1391
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0 };
1356
1392
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0 };
1357
1393
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0 };
1358
- struct gve_priv * priv = netdev_priv (dev );
1394
+
1395
+ gve_get_curr_alloc_cfgs (priv , & qpls_alloc_cfg ,
1396
+ & tx_alloc_cfg , & rx_alloc_cfg );
1397
+ gve_queues_mem_free (priv , & qpls_alloc_cfg ,
1398
+ & tx_alloc_cfg , & rx_alloc_cfg );
1399
+ priv -> qpls = NULL ;
1400
+ priv -> tx = NULL ;
1401
+ priv -> rx = NULL ;
1402
+ }
1403
+
1404
+ /* The passed-in queue memory is stored into priv and the queues are made live.
1405
+ * No memory is allocated. Passed-in memory is freed on errors.
1406
+ */
1407
+ static int gve_queues_start (struct gve_priv * priv ,
1408
+ struct gve_qpls_alloc_cfg * qpls_alloc_cfg ,
1409
+ struct gve_tx_alloc_rings_cfg * tx_alloc_cfg ,
1410
+ struct gve_rx_alloc_rings_cfg * rx_alloc_cfg )
1411
+ {
1412
+ struct net_device * dev = priv -> dev ;
1359
1413
int err ;
1360
1414
1415
+ /* Record new resources into priv */
1416
+ priv -> qpls = qpls_alloc_cfg -> qpls ;
1417
+ priv -> tx = tx_alloc_cfg -> tx ;
1418
+ priv -> rx = rx_alloc_cfg -> rx ;
1419
+
1420
+ /* Record new configs into priv */
1421
+ priv -> qpl_cfg = * qpls_alloc_cfg -> qpl_cfg ;
1422
+ priv -> tx_cfg = * tx_alloc_cfg -> qcfg ;
1423
+ priv -> rx_cfg = * rx_alloc_cfg -> qcfg ;
1424
+ priv -> tx_desc_cnt = tx_alloc_cfg -> ring_size ;
1425
+ priv -> rx_desc_cnt = rx_alloc_cfg -> ring_size ;
1426
+
1361
1427
if (priv -> xdp_prog )
1362
1428
priv -> num_xdp_queues = priv -> rx_cfg .num_queues ;
1363
1429
else
1364
1430
priv -> num_xdp_queues = 0 ;
1365
1431
1366
- gve_get_curr_alloc_cfgs (priv , & qpls_alloc_cfg ,
1367
- & tx_alloc_cfg , & rx_alloc_cfg );
1368
- err = gve_alloc_qpls (priv , & qpls_alloc_cfg );
1369
- if (err )
1370
- return err ;
1371
- priv -> qpls = qpls_alloc_cfg .qpls ;
1372
- tx_alloc_cfg .qpls = priv -> qpls ;
1373
- rx_alloc_cfg .qpls = priv -> qpls ;
1374
- err = gve_alloc_rings (priv , & tx_alloc_cfg , & rx_alloc_cfg );
1375
- if (err )
1376
- goto free_qpls ;
1377
-
1378
- gve_tx_start_rings (priv , 0 , tx_alloc_cfg .num_rings );
1379
- gve_rx_start_rings (priv , rx_alloc_cfg .qcfg -> num_queues );
1432
+ gve_tx_start_rings (priv , 0 , tx_alloc_cfg -> num_rings );
1433
+ gve_rx_start_rings (priv , rx_alloc_cfg -> qcfg -> num_queues );
1380
1434
gve_init_sync_stats (priv );
1381
1435
1382
1436
err = netif_set_real_num_tx_queues (dev , priv -> tx_cfg .num_queues );
1383
1437
if (err )
1384
- goto free_rings ;
1438
+ goto stop_and_free_rings ;
1385
1439
err = netif_set_real_num_rx_queues (dev , priv -> rx_cfg .num_queues );
1386
1440
if (err )
1387
- goto free_rings ;
1441
+ goto stop_and_free_rings ;
1388
1442
1389
1443
err = gve_reg_xdp_info (priv , dev );
1390
1444
if (err )
1391
- goto free_rings ;
1445
+ goto stop_and_free_rings ;
1392
1446
1393
1447
err = gve_register_qpls (priv );
1394
1448
if (err )
@@ -1416,37 +1470,53 @@ static int gve_open(struct net_device *dev)
1416
1470
priv -> interface_up_cnt ++ ;
1417
1471
return 0 ;
1418
1472
1419
- free_rings :
1420
- gve_tx_stop_rings (priv , 0 , tx_alloc_cfg .num_rings );
1421
- gve_rx_stop_rings (priv , rx_alloc_cfg .qcfg -> num_queues );
1422
- gve_free_rings (priv , & tx_alloc_cfg , & rx_alloc_cfg );
1423
- free_qpls :
1424
- gve_free_qpls (priv , & qpls_alloc_cfg );
1425
- return err ;
1426
-
1427
1473
reset :
1428
- /* This must have been called from a reset due to the rtnl lock
1429
- * so just return at this point.
1430
- */
1431
1474
if (gve_get_reset_in_progress (priv ))
1432
- return err ;
1433
- /* Otherwise reset before returning */
1475
+ goto stop_and_free_rings ;
1434
1476
gve_reset_and_teardown (priv , true);
1435
1477
/* if this fails there is nothing we can do so just ignore the return */
1436
1478
gve_reset_recovery (priv , false);
1437
1479
/* return the original error */
1438
1480
return err ;
1481
+ stop_and_free_rings :
1482
+ gve_tx_stop_rings (priv , 0 , gve_num_tx_queues (priv ));
1483
+ gve_rx_stop_rings (priv , priv -> rx_cfg .num_queues );
1484
+ gve_queues_mem_remove (priv );
1485
+ return err ;
1439
1486
}
1440
1487
1441
- static int gve_close (struct net_device * dev )
1488
+ static int gve_open (struct net_device * dev )
1442
1489
{
1443
1490
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0 };
1444
1491
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0 };
1445
1492
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0 };
1446
1493
struct gve_priv * priv = netdev_priv (dev );
1447
1494
int err ;
1448
1495
1449
- netif_carrier_off (dev );
1496
+ gve_get_curr_alloc_cfgs (priv , & qpls_alloc_cfg ,
1497
+ & tx_alloc_cfg , & rx_alloc_cfg );
1498
+
1499
+ err = gve_queues_mem_alloc (priv , & qpls_alloc_cfg ,
1500
+ & tx_alloc_cfg , & rx_alloc_cfg );
1501
+ if (err )
1502
+ return err ;
1503
+
1504
+ /* No need to free on error: ownership of resources is lost after
1505
+ * calling gve_queues_start.
1506
+ */
1507
+ err = gve_queues_start (priv , & qpls_alloc_cfg ,
1508
+ & tx_alloc_cfg , & rx_alloc_cfg );
1509
+ if (err )
1510
+ return err ;
1511
+
1512
+ return 0 ;
1513
+ }
1514
+
1515
+ static int gve_queues_stop (struct gve_priv * priv )
1516
+ {
1517
+ int err ;
1518
+
1519
+ netif_carrier_off (priv -> dev );
1450
1520
if (gve_get_device_rings_ok (priv )) {
1451
1521
gve_turndown (priv );
1452
1522
gve_drain_page_cache (priv );
@@ -1462,12 +1532,8 @@ static int gve_close(struct net_device *dev)
1462
1532
1463
1533
gve_unreg_xdp_info (priv );
1464
1534
1465
- gve_get_curr_alloc_cfgs (priv , & qpls_alloc_cfg ,
1466
- & tx_alloc_cfg , & rx_alloc_cfg );
1467
- gve_tx_stop_rings (priv , 0 , tx_alloc_cfg .num_rings );
1468
- gve_rx_stop_rings (priv , rx_alloc_cfg .qcfg -> num_queues );
1469
- gve_free_rings (priv , & tx_alloc_cfg , & rx_alloc_cfg );
1470
- gve_free_qpls (priv , & qpls_alloc_cfg );
1535
+ gve_tx_stop_rings (priv , 0 , gve_num_tx_queues (priv ));
1536
+ gve_rx_stop_rings (priv , priv -> rx_cfg .num_queues );
1471
1537
1472
1538
priv -> interface_down_cnt ++ ;
1473
1539
return 0 ;
@@ -1483,6 +1549,19 @@ static int gve_close(struct net_device *dev)
1483
1549
return gve_reset_recovery (priv , false);
1484
1550
}
1485
1551
1552
+ static int gve_close (struct net_device * dev )
1553
+ {
1554
+ struct gve_priv * priv = netdev_priv (dev );
1555
+ int err ;
1556
+
1557
+ err = gve_queues_stop (priv );
1558
+ if (err )
1559
+ return err ;
1560
+
1561
+ gve_queues_mem_remove (priv );
1562
+ return 0 ;
1563
+ }
1564
+
1486
1565
static int gve_remove_xdp_queues (struct gve_priv * priv )
1487
1566
{
1488
1567
int qpl_start_id ;
0 commit comments