@@ -584,11 +584,26 @@ static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
584
584
585
585
static int gve_register_qpls (struct gve_priv * priv )
586
586
{
587
- int num_qpls = gve_num_tx_qpls ( priv ) + gve_num_rx_qpls ( priv ) ;
587
+ int start_id ;
588
588
int err ;
589
589
int i ;
590
590
591
- for (i = 0 ; i < num_qpls ; i ++ ) {
591
+ start_id = gve_tx_start_qpl_id (priv );
592
+ for (i = start_id ; i < start_id + gve_num_tx_qpls (priv ); i ++ ) {
593
+ err = gve_adminq_register_page_list (priv , & priv -> qpls [i ]);
594
+ if (err ) {
595
+ netif_err (priv , drv , priv -> dev ,
596
+ "failed to register queue page list %d\n" ,
597
+ priv -> qpls [i ].id );
598
+ /* This failure will trigger a reset - no need to clean
599
+ * up
600
+ */
601
+ return err ;
602
+ }
603
+ }
604
+
605
+ start_id = gve_rx_start_qpl_id (priv );
606
+ for (i = start_id ; i < start_id + gve_num_rx_qpls (priv ); i ++ ) {
592
607
err = gve_adminq_register_page_list (priv , & priv -> qpls [i ]);
593
608
if (err ) {
594
609
netif_err (priv , drv , priv -> dev ,
@@ -605,11 +620,24 @@ static int gve_register_qpls(struct gve_priv *priv)
605
620
606
621
static int gve_unregister_qpls (struct gve_priv * priv )
607
622
{
608
- int num_qpls = gve_num_tx_qpls ( priv ) + gve_num_rx_qpls ( priv ) ;
623
+ int start_id ;
609
624
int err ;
610
625
int i ;
611
626
612
- for (i = 0 ; i < num_qpls ; i ++ ) {
627
+ start_id = gve_tx_start_qpl_id (priv );
628
+ for (i = start_id ; i < start_id + gve_num_tx_qpls (priv ); i ++ ) {
629
+ err = gve_adminq_unregister_page_list (priv , priv -> qpls [i ].id );
630
+ /* This failure will trigger a reset - no need to clean up */
631
+ if (err ) {
632
+ netif_err (priv , drv , priv -> dev ,
633
+ "Failed to unregister queue page list %d\n" ,
634
+ priv -> qpls [i ].id );
635
+ return err ;
636
+ }
637
+ }
638
+
639
+ start_id = gve_rx_start_qpl_id (priv );
640
+ for (i = start_id ; i < start_id + gve_num_rx_qpls (priv ); i ++ ) {
613
641
err = gve_adminq_unregister_page_list (priv , priv -> qpls [i ].id );
614
642
/* This failure will trigger a reset - no need to clean up */
615
643
if (err ) {
@@ -628,7 +656,7 @@ static int gve_create_rings(struct gve_priv *priv)
628
656
int err ;
629
657
int i ;
630
658
631
- err = gve_adminq_create_tx_queues (priv , num_tx_queues );
659
+ err = gve_adminq_create_tx_queues (priv , 0 , num_tx_queues );
632
660
if (err ) {
633
661
netif_err (priv , drv , priv -> dev , "failed to create %d tx queues\n" ,
634
662
num_tx_queues );
@@ -695,10 +723,10 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
695
723
}
696
724
}
697
725
698
- static void gve_tx_free_rings (struct gve_priv * priv )
726
+ static void gve_tx_free_rings (struct gve_priv * priv , int start_id , int num_rings )
699
727
{
700
728
if (gve_is_gqi (priv )) {
701
- gve_tx_free_rings_gqi (priv );
729
+ gve_tx_free_rings_gqi (priv , start_id , num_rings );
702
730
} else {
703
731
gve_tx_free_rings_dqo (priv );
704
732
}
@@ -709,20 +737,20 @@ static int gve_alloc_rings(struct gve_priv *priv)
709
737
int err ;
710
738
711
739
/* Setup tx rings */
712
- priv -> tx = kvcalloc (priv -> tx_cfg .num_queues , sizeof (* priv -> tx ),
740
+ priv -> tx = kvcalloc (priv -> tx_cfg .max_queues , sizeof (* priv -> tx ),
713
741
GFP_KERNEL );
714
742
if (!priv -> tx )
715
743
return - ENOMEM ;
716
744
717
745
if (gve_is_gqi (priv ))
718
- err = gve_tx_alloc_rings (priv );
746
+ err = gve_tx_alloc_rings (priv , 0 , gve_num_tx_queues ( priv ) );
719
747
else
720
748
err = gve_tx_alloc_rings_dqo (priv );
721
749
if (err )
722
750
goto free_tx ;
723
751
724
752
/* Setup rx rings */
725
- priv -> rx = kvcalloc (priv -> rx_cfg .num_queues , sizeof (* priv -> rx ),
753
+ priv -> rx = kvcalloc (priv -> rx_cfg .max_queues , sizeof (* priv -> rx ),
726
754
GFP_KERNEL );
727
755
if (!priv -> rx ) {
728
756
err = - ENOMEM ;
@@ -747,7 +775,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
747
775
kvfree (priv -> rx );
748
776
priv -> rx = NULL ;
749
777
free_tx_queue :
750
- gve_tx_free_rings (priv );
778
+ gve_tx_free_rings (priv , 0 , gve_num_tx_queues ( priv ) );
751
779
free_tx :
752
780
kvfree (priv -> tx );
753
781
priv -> tx = NULL ;
@@ -759,7 +787,7 @@ static int gve_destroy_rings(struct gve_priv *priv)
759
787
int num_tx_queues = gve_num_tx_queues (priv );
760
788
int err ;
761
789
762
- err = gve_adminq_destroy_tx_queues (priv , num_tx_queues );
790
+ err = gve_adminq_destroy_tx_queues (priv , 0 , num_tx_queues );
763
791
if (err ) {
764
792
netif_err (priv , drv , priv -> dev ,
765
793
"failed to destroy tx queues\n" );
@@ -797,7 +825,7 @@ static void gve_free_rings(struct gve_priv *priv)
797
825
ntfy_idx = gve_tx_idx_to_ntfy (priv , i );
798
826
gve_remove_napi (priv , ntfy_idx );
799
827
}
800
- gve_tx_free_rings (priv );
828
+ gve_tx_free_rings (priv , 0 , num_tx_queues );
801
829
kvfree (priv -> tx );
802
830
priv -> tx = NULL ;
803
831
}
@@ -894,40 +922,46 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
894
922
qpl -> page_buses [i ], gve_qpl_dma_dir (priv , id ));
895
923
896
924
kvfree (qpl -> page_buses );
925
+ qpl -> page_buses = NULL ;
897
926
free_pages :
898
927
kvfree (qpl -> pages );
928
+ qpl -> pages = NULL ;
899
929
priv -> num_registered_pages -= qpl -> num_entries ;
900
930
}
901
931
902
932
static int gve_alloc_qpls (struct gve_priv * priv )
903
933
{
904
- int num_qpls = gve_num_tx_qpls (priv ) + gve_num_rx_qpls (priv );
934
+ int max_queues = priv -> tx_cfg .max_queues + priv -> rx_cfg .max_queues ;
935
+ int start_id ;
905
936
int i , j ;
906
937
int err ;
907
938
908
- if (num_qpls == 0 )
939
+ if (priv -> queue_format != GVE_GQI_QPL_FORMAT )
909
940
return 0 ;
910
941
911
- priv -> qpls = kvcalloc (num_qpls , sizeof (* priv -> qpls ), GFP_KERNEL );
942
+ priv -> qpls = kvcalloc (max_queues , sizeof (* priv -> qpls ), GFP_KERNEL );
912
943
if (!priv -> qpls )
913
944
return - ENOMEM ;
914
945
915
- for (i = 0 ; i < gve_num_tx_qpls (priv ); i ++ ) {
946
+ start_id = gve_tx_start_qpl_id (priv );
947
+ for (i = start_id ; i < start_id + gve_num_tx_qpls (priv ); i ++ ) {
916
948
err = gve_alloc_queue_page_list (priv , i ,
917
949
priv -> tx_pages_per_qpl );
918
950
if (err )
919
951
goto free_qpls ;
920
952
}
921
- for (; i < num_qpls ; i ++ ) {
953
+
954
+ start_id = gve_rx_start_qpl_id (priv );
955
+ for (i = start_id ; i < start_id + gve_num_rx_qpls (priv ); i ++ ) {
922
956
err = gve_alloc_queue_page_list (priv , i ,
923
957
priv -> rx_data_slot_cnt );
924
958
if (err )
925
959
goto free_qpls ;
926
960
}
927
961
928
- priv -> qpl_cfg .qpl_map_size = BITS_TO_LONGS (num_qpls ) *
962
+ priv -> qpl_cfg .qpl_map_size = BITS_TO_LONGS (max_queues ) *
929
963
sizeof (unsigned long ) * BITS_PER_BYTE ;
930
- priv -> qpl_cfg .qpl_id_map = kvcalloc (BITS_TO_LONGS (num_qpls ),
964
+ priv -> qpl_cfg .qpl_id_map = kvcalloc (BITS_TO_LONGS (max_queues ),
931
965
sizeof (unsigned long ), GFP_KERNEL );
932
966
if (!priv -> qpl_cfg .qpl_id_map ) {
933
967
err = - ENOMEM ;
@@ -940,23 +974,26 @@ static int gve_alloc_qpls(struct gve_priv *priv)
940
974
for (j = 0 ; j <= i ; j ++ )
941
975
gve_free_queue_page_list (priv , j );
942
976
kvfree (priv -> qpls );
977
+ priv -> qpls = NULL ;
943
978
return err ;
944
979
}
945
980
946
981
static void gve_free_qpls (struct gve_priv * priv )
947
982
{
948
- int num_qpls = gve_num_tx_qpls ( priv ) + gve_num_rx_qpls ( priv ) ;
983
+ int max_queues = priv -> tx_cfg . max_queues + priv -> rx_cfg . max_queues ;
949
984
int i ;
950
985
951
- if (num_qpls == 0 )
986
+ if (! priv -> qpls )
952
987
return ;
953
988
954
989
kvfree (priv -> qpl_cfg .qpl_id_map );
990
+ priv -> qpl_cfg .qpl_id_map = NULL ;
955
991
956
- for (i = 0 ; i < num_qpls ; i ++ )
992
+ for (i = 0 ; i < max_queues ; i ++ )
957
993
gve_free_queue_page_list (priv , i );
958
994
959
995
kvfree (priv -> qpls );
996
+ priv -> qpls = NULL ;
960
997
}
961
998
962
999
/* Use this to schedule a reset when the device is capable of continuing
0 commit comments