Skip to content

Commit f13697c

Browse files
shailend-gkuba-moo
authored andcommitted
gve: Switch to config-aware queue allocation
The new config-aware functions will help achieve the goal of being able to allocate resources for new queues while there already are active queues serving traffic. These new functions work off of arbitrary queue allocation configs rather than just the currently active config in priv, and they return the newly allocated resources instead of writing them into priv. Signed-off-by: Shailend Chand <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Reviewed-by: Jeroen de Borst <[email protected]> Reviewed-by: Simon Horman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 1dfc2e4 commit f13697c

File tree

9 files changed

+745
-425
lines changed

9 files changed

+745
-425
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 50 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -966,14 +966,14 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
966966
priv->queue_format == GVE_DQO_QPL_FORMAT;
967967
}
968968

969-
/* Returns the number of tx queue page lists
970-
*/
971-
static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
969+
/* Returns the number of tx queue page lists */
970+
static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
971+
int num_xdp_queues,
972+
bool is_qpl)
972973
{
973-
if (!gve_is_qpl(priv))
974+
if (!is_qpl)
974975
return 0;
975-
976-
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
976+
return tx_cfg->num_queues + num_xdp_queues;
977977
}
978978

979979
/* Returns the number of XDP tx queue page lists
@@ -986,14 +986,13 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
986986
return priv->num_xdp_queues;
987987
}
988988

989-
/* Returns the number of rx queue page lists
990-
*/
991-
static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
989+
/* Returns the number of rx queue page lists */
990+
static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
991+
bool is_qpl)
992992
{
993-
if (!gve_is_qpl(priv))
993+
if (!is_qpl)
994994
return 0;
995-
996-
return priv->rx_cfg.num_queues;
995+
return rx_cfg->num_queues;
997996
}
998997

999998
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
@@ -1006,59 +1005,59 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
10061005
return priv->tx_cfg.max_queues + rx_qid;
10071006
}
10081007

1008+
/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
1009+
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
1010+
{
1011+
return tx_cfg->max_queues + rx_qid;
1012+
}
1013+
10091014
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
10101015
{
10111016
return gve_tx_qpl_id(priv, 0);
10121017
}
10131018

1014-
static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
1019+
/* Returns the index into priv->qpls where the first rx queue's QPL resides */
1020+
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
10151021
{
1016-
return gve_rx_qpl_id(priv, 0);
1022+
return gve_get_rx_qpl_id(tx_cfg, 0);
10171023
}
10181024

1019-
/* Returns a pointer to the next available tx qpl in the list of qpls
1020-
*/
1025+
/* Returns a pointer to the next available tx qpl in the list of qpls */
10211026
static inline
1022-
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
1027+
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
1028+
int tx_qid)
10231029
{
1024-
int id = gve_tx_qpl_id(priv, tx_qid);
1025-
10261030
/* QPL already in use */
1027-
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
1031+
if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
10281032
return NULL;
1029-
1030-
set_bit(id, priv->qpl_cfg.qpl_id_map);
1031-
return &priv->qpls[id];
1033+
set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
1034+
return &cfg->qpls[tx_qid];
10321035
}
10331036

1034-
/* Returns a pointer to the next available rx qpl in the list of qpls
1035-
*/
1037+
/* Returns a pointer to the next available rx qpl in the list of qpls */
10361038
static inline
1037-
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
1039+
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
1040+
int rx_qid)
10381041
{
1039-
int id = gve_rx_qpl_id(priv, rx_qid);
1040-
1042+
int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
10411043
/* QPL already in use */
1042-
if (test_bit(id, priv->qpl_cfg.qpl_id_map))
1044+
if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
10431045
return NULL;
1044-
1045-
set_bit(id, priv->qpl_cfg.qpl_id_map);
1046-
return &priv->qpls[id];
1046+
set_bit(id, cfg->qpl_cfg->qpl_id_map);
1047+
return &cfg->qpls[id];
10471048
}
10481049

1049-
/* Unassigns the qpl with the given id
1050-
*/
1051-
static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
1050+
/* Unassigns the qpl with the given id */
1051+
static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
10521052
{
1053-
clear_bit(id, priv->qpl_cfg.qpl_id_map);
1053+
clear_bit(id, qpl_cfg->qpl_id_map);
10541054
}
10551055

1056-
/* Returns the correct dma direction for tx and rx qpls
1057-
*/
1056+
/* Returns the correct dma direction for tx and rx qpls */
10581057
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
10591058
int id)
10601059
{
1061-
if (id < gve_rx_start_qpl_id(priv))
1060+
if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
10621061
return DMA_TO_DEVICE;
10631062
else
10641063
return DMA_FROM_DEVICE;
@@ -1103,8 +1102,12 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
11031102
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
11041103
bool gve_tx_poll(struct gve_notify_block *block, int budget);
11051104
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1106-
int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
1107-
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
1105+
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1106+
struct gve_tx_alloc_rings_cfg *cfg);
1107+
void gve_tx_free_rings_gqi(struct gve_priv *priv,
1108+
struct gve_tx_alloc_rings_cfg *cfg);
1109+
void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1110+
void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
11081111
u32 gve_tx_load_event_counter(struct gve_priv *priv,
11091112
struct gve_tx_ring *tx);
11101113
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
@@ -1113,7 +1116,12 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
11131116
int gve_rx_poll(struct gve_notify_block *block, int budget);
11141117
bool gve_rx_work_pending(struct gve_rx_ring *rx);
11151118
int gve_rx_alloc_rings(struct gve_priv *priv);
1116-
void gve_rx_free_rings_gqi(struct gve_priv *priv);
1119+
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1120+
struct gve_rx_alloc_rings_cfg *cfg);
1121+
void gve_rx_free_rings_gqi(struct gve_priv *priv,
1122+
struct gve_rx_alloc_rings_cfg *cfg);
1123+
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1124+
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
11171125
/* Reset */
11181126
void gve_schedule_reset(struct gve_priv *priv);
11191127
int gve_reset(struct gve_priv *priv, bool attempt_teardown);

drivers/net/ethernet/google/gve/gve_dqo.h

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,18 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
3838
netdev_features_t features);
3939
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
4040
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
41-
int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
42-
void gve_tx_free_rings_dqo(struct gve_priv *priv);
43-
int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
44-
void gve_rx_free_rings_dqo(struct gve_priv *priv);
41+
int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
42+
struct gve_tx_alloc_rings_cfg *cfg);
43+
void gve_tx_free_rings_dqo(struct gve_priv *priv,
44+
struct gve_tx_alloc_rings_cfg *cfg);
45+
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
46+
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
47+
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
48+
struct gve_rx_alloc_rings_cfg *cfg);
49+
void gve_rx_free_rings_dqo(struct gve_priv *priv,
50+
struct gve_rx_alloc_rings_cfg *cfg);
51+
void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx);
52+
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx);
4553
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
4654
struct napi_struct *napi);
4755
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);

0 commit comments

Comments
 (0)