Skip to content

Commit 7fc2bf7

Browse files
praveenkaligineedidavem330
authored andcommitted
gve: Changes to add new TX queues
Changes to enable adding and removing TX queues without calling gve_close() and gve_open(). Made the following changes: 1) priv->tx, priv->rx and priv->qpls arrays are allocated based on max tx queues and max rx queues 2) Changed gve_adminq_create_tx_queues(), gve_adminq_destroy_tx_queues(), gve_tx_alloc_rings() and gve_tx_free_rings() functions to add/remove a subset of TX queues rather than all the TX queues. Signed-off-by: Praveen Kaligineedi <[email protected]> Reviewed-by: Jeroen de Borst <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 2e80aea commit 7fc2bf7

File tree

6 files changed

+104
-50
lines changed

6 files changed

+104
-50
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -798,16 +798,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
798798
return priv->rx_cfg.num_queues;
799799
}
800800

801+
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
802+
{
803+
return tx_qid;
804+
}
805+
806+
static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
807+
{
808+
return priv->tx_cfg.max_queues + rx_qid;
809+
}
810+
811+
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
812+
{
813+
return gve_tx_qpl_id(priv, 0);
814+
}
815+
816+
static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
817+
{
818+
return gve_rx_qpl_id(priv, 0);
819+
}
820+
801821
/* Returns a pointer to the next available tx qpl in the list of qpls
802822
*/
803823
static inline
804-
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
824+
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
805825
{
806-
int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
807-
priv->qpl_cfg.qpl_map_size);
826+
int id = gve_tx_qpl_id(priv, tx_qid);
808827

809-
/* we are out of tx qpls */
810-
if (id >= gve_num_tx_qpls(priv))
828+
/* QPL already in use */
829+
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
811830
return NULL;
812831

813832
set_bit(id, priv->qpl_cfg.qpl_id_map);
@@ -817,14 +836,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
817836
/* Returns a pointer to the next available rx qpl in the list of qpls
818837
*/
819838
static inline
820-
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
839+
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
821840
{
822-
int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
823-
priv->qpl_cfg.qpl_map_size,
824-
gve_num_tx_qpls(priv));
841+
int id = gve_rx_qpl_id(priv, rx_qid);
825842

826-
/* we are out of rx qpls */
827-
if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
843+
/* QPL already in use */
844+
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
828845
return NULL;
829846

830847
set_bit(id, priv->qpl_cfg.qpl_id_map);
@@ -843,7 +860,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
843860
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
844861
int id)
845862
{
846-
if (id < gve_num_tx_qpls(priv))
863+
if (id < gve_rx_start_qpl_id(priv))
847864
return DMA_TO_DEVICE;
848865
else
849866
return DMA_FROM_DEVICE;
@@ -869,8 +886,8 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
869886
/* tx handling */
870887
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
871888
bool gve_tx_poll(struct gve_notify_block *block, int budget);
872-
int gve_tx_alloc_rings(struct gve_priv *priv);
873-
void gve_tx_free_rings_gqi(struct gve_priv *priv);
889+
int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
890+
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
874891
u32 gve_tx_load_event_counter(struct gve_priv *priv,
875892
struct gve_tx_ring *tx);
876893
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);

drivers/net/ethernet/google/gve/gve_adminq.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -516,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
516516
return gve_adminq_issue_cmd(priv, &cmd);
517517
}
518518

519-
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
519+
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
520520
{
521521
int err;
522522
int i;
523523

524-
for (i = 0; i < num_queues; i++) {
524+
for (i = start_id; i < start_id + num_queues; i++) {
525525
err = gve_adminq_create_tx_queue(priv, i);
526526
if (err)
527527
return err;
@@ -604,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
604604
return 0;
605605
}
606606

607-
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
607+
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
608608
{
609609
int err;
610610
int i;
611611

612-
for (i = 0; i < num_queues; i++) {
612+
for (i = start_id; i < start_id + num_queues; i++) {
613613
err = gve_adminq_destroy_tx_queue(priv, i);
614614
if (err)
615615
return err;

drivers/net/ethernet/google/gve/gve_adminq.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -410,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
410410
dma_addr_t db_array_bus_addr,
411411
u32 num_ntfy_blks);
412412
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
413-
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
414-
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
413+
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
414+
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
415415
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
416416
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
417417
int gve_adminq_register_page_list(struct gve_priv *priv,

drivers/net/ethernet/google/gve/gve_main.c

Lines changed: 60 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -584,11 +584,26 @@ static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
584584

585585
static int gve_register_qpls(struct gve_priv *priv)
586586
{
587-
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
587+
int start_id;
588588
int err;
589589
int i;
590590

591-
for (i = 0; i < num_qpls; i++) {
591+
start_id = gve_tx_start_qpl_id(priv);
592+
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
593+
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
594+
if (err) {
595+
netif_err(priv, drv, priv->dev,
596+
"failed to register queue page list %d\n",
597+
priv->qpls[i].id);
598+
/* This failure will trigger a reset - no need to clean
599+
* up
600+
*/
601+
return err;
602+
}
603+
}
604+
605+
start_id = gve_rx_start_qpl_id(priv);
606+
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
592607
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
593608
if (err) {
594609
netif_err(priv, drv, priv->dev,
@@ -605,11 +620,24 @@ static int gve_register_qpls(struct gve_priv *priv)
605620

606621
static int gve_unregister_qpls(struct gve_priv *priv)
607622
{
608-
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
623+
int start_id;
609624
int err;
610625
int i;
611626

612-
for (i = 0; i < num_qpls; i++) {
627+
start_id = gve_tx_start_qpl_id(priv);
628+
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
629+
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
630+
/* This failure will trigger a reset - no need to clean up */
631+
if (err) {
632+
netif_err(priv, drv, priv->dev,
633+
"Failed to unregister queue page list %d\n",
634+
priv->qpls[i].id);
635+
return err;
636+
}
637+
}
638+
639+
start_id = gve_rx_start_qpl_id(priv);
640+
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
613641
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
614642
/* This failure will trigger a reset - no need to clean up */
615643
if (err) {
@@ -628,7 +656,7 @@ static int gve_create_rings(struct gve_priv *priv)
628656
int err;
629657
int i;
630658

631-
err = gve_adminq_create_tx_queues(priv, num_tx_queues);
659+
err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
632660
if (err) {
633661
netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
634662
num_tx_queues);
@@ -695,10 +723,10 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
695723
}
696724
}
697725

698-
static void gve_tx_free_rings(struct gve_priv *priv)
726+
static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
699727
{
700728
if (gve_is_gqi(priv)) {
701-
gve_tx_free_rings_gqi(priv);
729+
gve_tx_free_rings_gqi(priv, start_id, num_rings);
702730
} else {
703731
gve_tx_free_rings_dqo(priv);
704732
}
@@ -709,20 +737,20 @@ static int gve_alloc_rings(struct gve_priv *priv)
709737
int err;
710738

711739
/* Setup tx rings */
712-
priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
740+
priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
713741
GFP_KERNEL);
714742
if (!priv->tx)
715743
return -ENOMEM;
716744

717745
if (gve_is_gqi(priv))
718-
err = gve_tx_alloc_rings(priv);
746+
err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
719747
else
720748
err = gve_tx_alloc_rings_dqo(priv);
721749
if (err)
722750
goto free_tx;
723751

724752
/* Setup rx rings */
725-
priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
753+
priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
726754
GFP_KERNEL);
727755
if (!priv->rx) {
728756
err = -ENOMEM;
@@ -747,7 +775,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
747775
kvfree(priv->rx);
748776
priv->rx = NULL;
749777
free_tx_queue:
750-
gve_tx_free_rings(priv);
778+
gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
751779
free_tx:
752780
kvfree(priv->tx);
753781
priv->tx = NULL;
@@ -759,7 +787,7 @@ static int gve_destroy_rings(struct gve_priv *priv)
759787
int num_tx_queues = gve_num_tx_queues(priv);
760788
int err;
761789

762-
err = gve_adminq_destroy_tx_queues(priv, num_tx_queues);
790+
err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
763791
if (err) {
764792
netif_err(priv, drv, priv->dev,
765793
"failed to destroy tx queues\n");
@@ -797,7 +825,7 @@ static void gve_free_rings(struct gve_priv *priv)
797825
ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
798826
gve_remove_napi(priv, ntfy_idx);
799827
}
800-
gve_tx_free_rings(priv);
828+
gve_tx_free_rings(priv, 0, num_tx_queues);
801829
kvfree(priv->tx);
802830
priv->tx = NULL;
803831
}
@@ -894,40 +922,46 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
894922
qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
895923

896924
kvfree(qpl->page_buses);
925+
qpl->page_buses = NULL;
897926
free_pages:
898927
kvfree(qpl->pages);
928+
qpl->pages = NULL;
899929
priv->num_registered_pages -= qpl->num_entries;
900930
}
901931

902932
static int gve_alloc_qpls(struct gve_priv *priv)
903933
{
904-
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
934+
int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
935+
int start_id;
905936
int i, j;
906937
int err;
907938

908-
if (num_qpls == 0)
939+
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
909940
return 0;
910941

911-
priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
942+
priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
912943
if (!priv->qpls)
913944
return -ENOMEM;
914945

915-
for (i = 0; i < gve_num_tx_qpls(priv); i++) {
946+
start_id = gve_tx_start_qpl_id(priv);
947+
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
916948
err = gve_alloc_queue_page_list(priv, i,
917949
priv->tx_pages_per_qpl);
918950
if (err)
919951
goto free_qpls;
920952
}
921-
for (; i < num_qpls; i++) {
953+
954+
start_id = gve_rx_start_qpl_id(priv);
955+
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
922956
err = gve_alloc_queue_page_list(priv, i,
923957
priv->rx_data_slot_cnt);
924958
if (err)
925959
goto free_qpls;
926960
}
927961

928-
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
962+
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
929963
sizeof(unsigned long) * BITS_PER_BYTE;
930-
priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
964+
priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
931965
sizeof(unsigned long), GFP_KERNEL);
932966
if (!priv->qpl_cfg.qpl_id_map) {
933967
err = -ENOMEM;
@@ -940,23 +974,26 @@ static int gve_alloc_qpls(struct gve_priv *priv)
940974
for (j = 0; j <= i; j++)
941975
gve_free_queue_page_list(priv, j);
942976
kvfree(priv->qpls);
977+
priv->qpls = NULL;
943978
return err;
944979
}
945980

946981
static void gve_free_qpls(struct gve_priv *priv)
947982
{
948-
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
983+
int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
949984
int i;
950985

951-
if (num_qpls == 0)
986+
if (!priv->qpls)
952987
return;
953988

954989
kvfree(priv->qpl_cfg.qpl_id_map);
990+
priv->qpl_cfg.qpl_id_map = NULL;
955991

956-
for (i = 0; i < num_qpls; i++)
992+
for (i = 0; i < max_queues; i++)
957993
gve_free_queue_page_list(priv, i);
958994

959995
kvfree(priv->qpls);
996+
priv->qpls = NULL;
960997
}
961998

962999
/* Use this to schedule a reset when the device is capable of continuing

drivers/net/ethernet/google/gve/gve_rx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
124124
return -ENOMEM;
125125

126126
if (!rx->data.raw_addressing) {
127-
rx->data.qpl = gve_assign_rx_qpl(priv);
127+
rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
128128
if (!rx->data.qpl) {
129129
kvfree(rx->data.page_info);
130130
rx->data.page_info = NULL;

drivers/net/ethernet/google/gve/gve_tx.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
195195
tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
196196
tx->dev = &priv->pdev->dev;
197197
if (!tx->raw_addressing) {
198-
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
198+
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
199199
if (!tx->tx_fifo.qpl)
200200
goto abort_with_desc;
201201
/* map Tx FIFO */
@@ -233,12 +233,12 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
233233
return -ENOMEM;
234234
}
235235

236-
int gve_tx_alloc_rings(struct gve_priv *priv)
236+
int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
237237
{
238238
int err = 0;
239239
int i;
240240

241-
for (i = 0; i < priv->tx_cfg.num_queues; i++) {
241+
for (i = start_id; i < start_id + num_rings; i++) {
242242
err = gve_tx_alloc_ring(priv, i);
243243
if (err) {
244244
netif_err(priv, drv, priv->dev,
@@ -251,17 +251,17 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
251251
if (err) {
252252
int j;
253253

254-
for (j = 0; j < i; j++)
254+
for (j = start_id; j < i; j++)
255255
gve_tx_free_ring(priv, j);
256256
}
257257
return err;
258258
}
259259

260-
void gve_tx_free_rings_gqi(struct gve_priv *priv)
260+
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
261261
{
262262
int i;
263263

264-
for (i = 0; i < priv->tx_cfg.num_queues; i++)
264+
for (i = start_id; i < start_id + num_rings; i++)
265265
gve_tx_free_ring(priv, i);
266266
}
267267

0 commit comments

Comments
 (0)