@@ -39,7 +39,8 @@ void gve_parse_device_option(struct gve_priv *priv,
39
39
struct gve_device_option_gqi_rda * * dev_op_gqi_rda ,
40
40
struct gve_device_option_gqi_qpl * * dev_op_gqi_qpl ,
41
41
struct gve_device_option_dqo_rda * * dev_op_dqo_rda ,
42
- struct gve_device_option_jumbo_frames * * dev_op_jumbo_frames )
42
+ struct gve_device_option_jumbo_frames * * dev_op_jumbo_frames ,
43
+ struct gve_device_option_dqo_qpl * * dev_op_dqo_qpl )
43
44
{
44
45
u32 req_feat_mask = be32_to_cpu (option -> required_features_mask );
45
46
u16 option_length = be16_to_cpu (option -> option_length );
@@ -112,6 +113,22 @@ void gve_parse_device_option(struct gve_priv *priv,
112
113
}
113
114
* dev_op_dqo_rda = (void * )(option + 1 );
114
115
break ;
116
+ case GVE_DEV_OPT_ID_DQO_QPL :
117
+ if (option_length < sizeof (* * dev_op_dqo_qpl ) ||
118
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL ) {
119
+ dev_warn (& priv -> pdev -> dev , GVE_DEVICE_OPTION_ERROR_FMT ,
120
+ "DQO QPL" , (int )sizeof (* * dev_op_dqo_qpl ),
121
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL ,
122
+ option_length , req_feat_mask );
123
+ break ;
124
+ }
125
+
126
+ if (option_length > sizeof (* * dev_op_dqo_qpl )) {
127
+ dev_warn (& priv -> pdev -> dev ,
128
+ GVE_DEVICE_OPTION_TOO_BIG_FMT , "DQO QPL" );
129
+ }
130
+ * dev_op_dqo_qpl = (void * )(option + 1 );
131
+ break ;
115
132
case GVE_DEV_OPT_ID_JUMBO_FRAMES :
116
133
if (option_length < sizeof (* * dev_op_jumbo_frames ) ||
117
134
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES ) {
@@ -146,7 +163,8 @@ gve_process_device_options(struct gve_priv *priv,
146
163
struct gve_device_option_gqi_rda * * dev_op_gqi_rda ,
147
164
struct gve_device_option_gqi_qpl * * dev_op_gqi_qpl ,
148
165
struct gve_device_option_dqo_rda * * dev_op_dqo_rda ,
149
- struct gve_device_option_jumbo_frames * * dev_op_jumbo_frames )
166
+ struct gve_device_option_jumbo_frames * * dev_op_jumbo_frames ,
167
+ struct gve_device_option_dqo_qpl * * dev_op_dqo_qpl )
150
168
{
151
169
const int num_options = be16_to_cpu (descriptor -> num_device_options );
152
170
struct gve_device_option * dev_opt ;
@@ -166,7 +184,8 @@ gve_process_device_options(struct gve_priv *priv,
166
184
167
185
gve_parse_device_option (priv , descriptor , dev_opt ,
168
186
dev_op_gqi_rda , dev_op_gqi_qpl ,
169
- dev_op_dqo_rda , dev_op_jumbo_frames );
187
+ dev_op_dqo_rda , dev_op_jumbo_frames ,
188
+ dev_op_dqo_qpl );
170
189
dev_opt = next_opt ;
171
190
}
172
191
@@ -505,12 +524,24 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
505
524
506
525
cmd .create_tx_queue .queue_page_list_id = cpu_to_be32 (qpl_id );
507
526
} else {
527
+ u16 comp_ring_size ;
528
+ u32 qpl_id = 0 ;
529
+
530
+ if (priv -> queue_format == GVE_DQO_RDA_FORMAT ) {
531
+ qpl_id = GVE_RAW_ADDRESSING_QPL_ID ;
532
+ comp_ring_size =
533
+ priv -> options_dqo_rda .tx_comp_ring_entries ;
534
+ } else {
535
+ qpl_id = tx -> dqo .qpl -> id ;
536
+ comp_ring_size = priv -> tx_desc_cnt ;
537
+ }
538
+ cmd .create_tx_queue .queue_page_list_id = cpu_to_be32 (qpl_id );
508
539
cmd .create_tx_queue .tx_ring_size =
509
540
cpu_to_be16 (priv -> tx_desc_cnt );
510
541
cmd .create_tx_queue .tx_comp_ring_addr =
511
542
cpu_to_be64 (tx -> complq_bus_dqo );
512
543
cmd .create_tx_queue .tx_comp_ring_size =
513
- cpu_to_be16 (priv -> options_dqo_rda . tx_comp_ring_entries );
544
+ cpu_to_be16 (comp_ring_size );
514
545
}
515
546
516
547
return gve_adminq_issue_cmd (priv , & cmd );
@@ -555,6 +586,18 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
555
586
cmd .create_rx_queue .queue_page_list_id = cpu_to_be32 (qpl_id );
556
587
cmd .create_rx_queue .packet_buffer_size = cpu_to_be16 (rx -> packet_buffer_size );
557
588
} else {
589
+ u16 rx_buff_ring_entries ;
590
+ u32 qpl_id = 0 ;
591
+
592
+ if (priv -> queue_format == GVE_DQO_RDA_FORMAT ) {
593
+ qpl_id = GVE_RAW_ADDRESSING_QPL_ID ;
594
+ rx_buff_ring_entries =
595
+ priv -> options_dqo_rda .rx_buff_ring_entries ;
596
+ } else {
597
+ qpl_id = rx -> dqo .qpl -> id ;
598
+ rx_buff_ring_entries = priv -> rx_desc_cnt ;
599
+ }
600
+ cmd .create_rx_queue .queue_page_list_id = cpu_to_be32 (qpl_id );
558
601
cmd .create_rx_queue .rx_ring_size =
559
602
cpu_to_be16 (priv -> rx_desc_cnt );
560
603
cmd .create_rx_queue .rx_desc_ring_addr =
@@ -564,7 +607,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
564
607
cmd .create_rx_queue .packet_buffer_size =
565
608
cpu_to_be16 (priv -> data_buffer_size_dqo );
566
609
cmd .create_rx_queue .rx_buff_ring_size =
567
- cpu_to_be16 (priv -> options_dqo_rda . rx_buff_ring_entries );
610
+ cpu_to_be16 (rx_buff_ring_entries );
568
611
cmd .create_rx_queue .enable_rsc =
569
612
!!(priv -> dev -> features & NETIF_F_LRO );
570
613
}
@@ -675,9 +718,13 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
675
718
const struct gve_device_option_dqo_rda * dev_op_dqo_rda )
676
719
{
677
720
priv -> tx_desc_cnt = be16_to_cpu (descriptor -> tx_queue_entries );
721
+ priv -> rx_desc_cnt = be16_to_cpu (descriptor -> rx_queue_entries );
722
+
723
+ if (priv -> queue_format == GVE_DQO_QPL_FORMAT )
724
+ return 0 ;
725
+
678
726
priv -> options_dqo_rda .tx_comp_ring_entries =
679
727
be16_to_cpu (dev_op_dqo_rda -> tx_comp_ring_entries );
680
- priv -> rx_desc_cnt = be16_to_cpu (descriptor -> rx_queue_entries );
681
728
priv -> options_dqo_rda .rx_buff_ring_entries =
682
729
be16_to_cpu (dev_op_dqo_rda -> rx_buff_ring_entries );
683
730
@@ -687,7 +734,9 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
687
734
static void gve_enable_supported_features (struct gve_priv * priv ,
688
735
u32 supported_features_mask ,
689
736
const struct gve_device_option_jumbo_frames
690
- * dev_op_jumbo_frames )
737
+ * dev_op_jumbo_frames ,
738
+ const struct gve_device_option_dqo_qpl
739
+ * dev_op_dqo_qpl )
691
740
{
692
741
/* Before control reaches this point, the page-size-capped max MTU from
693
742
* the gve_device_descriptor field has already been stored in
@@ -699,6 +748,18 @@ static void gve_enable_supported_features(struct gve_priv *priv,
699
748
"JUMBO FRAMES device option enabled.\n" );
700
749
priv -> dev -> max_mtu = be16_to_cpu (dev_op_jumbo_frames -> max_mtu );
701
750
}
751
+
752
+ /* Override pages for qpl for DQO-QPL */
753
+ if (dev_op_dqo_qpl ) {
754
+ priv -> tx_pages_per_qpl =
755
+ be16_to_cpu (dev_op_dqo_qpl -> tx_pages_per_qpl );
756
+ priv -> rx_pages_per_qpl =
757
+ be16_to_cpu (dev_op_dqo_qpl -> rx_pages_per_qpl );
758
+ if (priv -> tx_pages_per_qpl == 0 )
759
+ priv -> tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES ;
760
+ if (priv -> rx_pages_per_qpl == 0 )
761
+ priv -> rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES ;
762
+ }
702
763
}
703
764
704
765
int gve_adminq_describe_device (struct gve_priv * priv )
@@ -707,6 +768,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
707
768
struct gve_device_option_gqi_rda * dev_op_gqi_rda = NULL ;
708
769
struct gve_device_option_gqi_qpl * dev_op_gqi_qpl = NULL ;
709
770
struct gve_device_option_dqo_rda * dev_op_dqo_rda = NULL ;
771
+ struct gve_device_option_dqo_qpl * dev_op_dqo_qpl = NULL ;
710
772
struct gve_device_descriptor * descriptor ;
711
773
u32 supported_features_mask = 0 ;
712
774
union gve_adminq_command cmd ;
@@ -733,21 +795,26 @@ int gve_adminq_describe_device(struct gve_priv *priv)
733
795
734
796
err = gve_process_device_options (priv , descriptor , & dev_op_gqi_rda ,
735
797
& dev_op_gqi_qpl , & dev_op_dqo_rda ,
736
- & dev_op_jumbo_frames );
798
+ & dev_op_jumbo_frames ,
799
+ & dev_op_dqo_qpl );
737
800
if (err )
738
801
goto free_device_descriptor ;
739
802
740
803
/* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
741
804
* is not set to GqiRda, choose the queue format in a priority order:
742
- * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
805
+ * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default.
743
806
*/
744
807
if (dev_op_dqo_rda ) {
745
808
priv -> queue_format = GVE_DQO_RDA_FORMAT ;
746
809
dev_info (& priv -> pdev -> dev ,
747
810
"Driver is running with DQO RDA queue format.\n" );
748
811
supported_features_mask =
749
812
be32_to_cpu (dev_op_dqo_rda -> supported_features_mask );
750
- } else if (dev_op_gqi_rda ) {
813
+ } else if (dev_op_dqo_qpl ) {
814
+ priv -> queue_format = GVE_DQO_QPL_FORMAT ;
815
+ supported_features_mask =
816
+ be32_to_cpu (dev_op_dqo_qpl -> supported_features_mask );
817
+ } else if (dev_op_gqi_rda ) {
751
818
priv -> queue_format = GVE_GQI_RDA_FORMAT ;
752
819
dev_info (& priv -> pdev -> dev ,
753
820
"Driver is running with GQI RDA queue format.\n" );
@@ -798,7 +865,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
798
865
priv -> default_num_queues = be16_to_cpu (descriptor -> default_num_queues );
799
866
800
867
gve_enable_supported_features (priv , supported_features_mask ,
801
- dev_op_jumbo_frames );
868
+ dev_op_jumbo_frames , dev_op_dqo_qpl );
802
869
803
870
free_device_descriptor :
804
871
dma_free_coherent (& priv -> pdev -> dev , PAGE_SIZE , descriptor ,
0 commit comments