Skip to content

Commit 9c1a59a

Browse files
baileyforrestdavem330
authored andcommitted
gve: DQO: Add ring allocation and initialization
Allocate the buffer and completion ring structures. Do not populate the rings yet. That will happen in the respective rx and tx datapath follow-on patches Signed-off-by: Bailey Forrest <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Reviewed-by: Catherine Sullivan <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 5e8c5ad commit 9c1a59a

File tree

7 files changed

+420
-13
lines changed

7 files changed

+420
-13
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,10 @@ struct gve_rx_ring {
204204
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
205205
dma_addr_t q_resources_bus; /* dma address for the queue resources */
206206
struct u64_stats_sync statss; /* sync stats for 32bit archs */
207+
208+
/* head and tail of skb chain for the current packet or NULL if none */
209+
struct sk_buff *skb_head;
210+
struct sk_buff *skb_tail;
207211
};
208212

209213
/* A TX desc ring entry */
@@ -816,14 +820,14 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
816820
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
817821
bool gve_tx_poll(struct gve_notify_block *block, int budget);
818822
int gve_tx_alloc_rings(struct gve_priv *priv);
819-
void gve_tx_free_rings(struct gve_priv *priv);
823+
void gve_tx_free_rings_gqi(struct gve_priv *priv);
820824
__be32 gve_tx_load_event_counter(struct gve_priv *priv,
821825
struct gve_tx_ring *tx);
822826
/* rx handling */
823827
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
824828
bool gve_rx_poll(struct gve_notify_block *block, int budget);
825829
int gve_rx_alloc_rings(struct gve_priv *priv);
826-
void gve_rx_free_rings(struct gve_priv *priv);
830+
void gve_rx_free_rings_gqi(struct gve_priv *priv);
827831
bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
828832
netdev_features_t feat);
829833
/* Reset */

drivers/net/ethernet/google/gve/gve_dqo.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,24 @@
1919
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
2020
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
2121
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
22+
int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
23+
void gve_tx_free_rings_dqo(struct gve_priv *priv);
24+
int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
25+
void gve_rx_free_rings_dqo(struct gve_priv *priv);
26+
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
27+
struct napi_struct *napi);
28+
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
29+
void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx);
30+
31+
static inline void
32+
gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
33+
const struct gve_queue_resources *q_resources, u32 val)
34+
{
35+
u64 index;
36+
37+
index = be32_to_cpu(q_resources->db_index);
38+
iowrite32(val, &priv->db_bar2[index]);
39+
}
2240

2341
static inline void
2442
gve_write_irq_doorbell_dqo(const struct gve_priv *priv,

drivers/net/ethernet/google/gve/gve_main.c

Lines changed: 44 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -571,13 +571,21 @@ static int gve_create_rings(struct gve_priv *priv)
571571
netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
572572
priv->rx_cfg.num_queues);
573573

574-
/* Rx data ring has been prefilled with packet buffers at queue
575-
* allocation time.
576-
* Write the doorbell to provide descriptor slots and packet buffers
577-
* to the NIC.
578-
*/
579-
for (i = 0; i < priv->rx_cfg.num_queues; i++)
580-
gve_rx_write_doorbell(priv, &priv->rx[i]);
574+
if (gve_is_gqi(priv)) {
575+
/* Rx data ring has been prefilled with packet buffers at queue
576+
* allocation time.
577+
*
578+
* Write the doorbell to provide descriptor slots and packet
579+
* buffers to the NIC.
580+
*/
581+
for (i = 0; i < priv->rx_cfg.num_queues; i++)
582+
gve_rx_write_doorbell(priv, &priv->rx[i]);
583+
} else {
584+
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
585+
/* Post buffers and ring doorbell. */
586+
gve_rx_post_buffers_dqo(&priv->rx[i]);
587+
}
588+
}
581589

582590
return 0;
583591
}
@@ -606,6 +614,15 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
606614
}
607615
}
608616

617+
static void gve_tx_free_rings(struct gve_priv *priv)
618+
{
619+
if (gve_is_gqi(priv)) {
620+
gve_tx_free_rings_gqi(priv);
621+
} else {
622+
gve_tx_free_rings_dqo(priv);
623+
}
624+
}
625+
609626
static int gve_alloc_rings(struct gve_priv *priv)
610627
{
611628
int err;
@@ -615,17 +632,26 @@ static int gve_alloc_rings(struct gve_priv *priv)
615632
GFP_KERNEL);
616633
if (!priv->tx)
617634
return -ENOMEM;
618-
err = gve_tx_alloc_rings(priv);
635+
636+
if (gve_is_gqi(priv))
637+
err = gve_tx_alloc_rings(priv);
638+
else
639+
err = gve_tx_alloc_rings_dqo(priv);
619640
if (err)
620641
goto free_tx;
642+
621643
/* Setup rx rings */
622644
priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
623645
GFP_KERNEL);
624646
if (!priv->rx) {
625647
err = -ENOMEM;
626648
goto free_tx_queue;
627649
}
628-
err = gve_rx_alloc_rings(priv);
650+
651+
if (gve_is_gqi(priv))
652+
err = gve_rx_alloc_rings(priv);
653+
else
654+
err = gve_rx_alloc_rings_dqo(priv);
629655
if (err)
630656
goto free_rx;
631657

@@ -670,6 +696,14 @@ static int gve_destroy_rings(struct gve_priv *priv)
670696
return 0;
671697
}
672698

699+
static inline void gve_rx_free_rings(struct gve_priv *priv)
700+
{
701+
if (gve_is_gqi(priv))
702+
gve_rx_free_rings_gqi(priv);
703+
else
704+
gve_rx_free_rings_dqo(priv);
705+
}
706+
673707
static void gve_free_rings(struct gve_priv *priv)
674708
{
675709
int ntfy_idx;
@@ -869,6 +903,7 @@ static int gve_open(struct net_device *dev)
869903
err = gve_alloc_qpls(priv);
870904
if (err)
871905
return err;
906+
872907
err = gve_alloc_rings(priv);
873908
if (err)
874909
goto free_qpls;

drivers/net/ethernet/google/gve/gve_rx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ int gve_rx_alloc_rings(struct gve_priv *priv)
238238
return err;
239239
}
240240

241-
void gve_rx_free_rings(struct gve_priv *priv)
241+
void gve_rx_free_rings_gqi(struct gve_priv *priv)
242242
{
243243
int i;
244244

drivers/net/ethernet/google/gve/gve_rx_dqo.c

Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,163 @@
1616
#include <net/ipv6.h>
1717
#include <net/tcp.h>
1818

19+
static void gve_free_page_dqo(struct gve_priv *priv,
20+
struct gve_rx_buf_state_dqo *bs)
21+
{
22+
}
23+
24+
static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
25+
{
26+
struct gve_rx_ring *rx = &priv->rx[idx];
27+
struct device *hdev = &priv->pdev->dev;
28+
size_t completion_queue_slots;
29+
size_t buffer_queue_slots;
30+
size_t size;
31+
int i;
32+
33+
completion_queue_slots = rx->dqo.complq.mask + 1;
34+
buffer_queue_slots = rx->dqo.bufq.mask + 1;
35+
36+
gve_rx_remove_from_block(priv, idx);
37+
38+
if (rx->q_resources) {
39+
dma_free_coherent(hdev, sizeof(*rx->q_resources),
40+
rx->q_resources, rx->q_resources_bus);
41+
rx->q_resources = NULL;
42+
}
43+
44+
for (i = 0; i < rx->dqo.num_buf_states; i++) {
45+
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
46+
47+
if (bs->page_info.page)
48+
gve_free_page_dqo(priv, bs);
49+
}
50+
51+
if (rx->dqo.bufq.desc_ring) {
52+
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
53+
dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring,
54+
rx->dqo.bufq.bus);
55+
rx->dqo.bufq.desc_ring = NULL;
56+
}
57+
58+
if (rx->dqo.complq.desc_ring) {
59+
size = sizeof(rx->dqo.complq.desc_ring[0]) *
60+
completion_queue_slots;
61+
dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring,
62+
rx->dqo.complq.bus);
63+
rx->dqo.complq.desc_ring = NULL;
64+
}
65+
66+
kvfree(rx->dqo.buf_states);
67+
rx->dqo.buf_states = NULL;
68+
69+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
70+
}
71+
72+
static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
73+
{
74+
struct gve_rx_ring *rx = &priv->rx[idx];
75+
struct device *hdev = &priv->pdev->dev;
76+
size_t size;
77+
int i;
78+
79+
const u32 buffer_queue_slots =
80+
priv->options_dqo_rda.rx_buff_ring_entries;
81+
const u32 completion_queue_slots = priv->rx_desc_cnt;
82+
83+
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
84+
85+
memset(rx, 0, sizeof(*rx));
86+
rx->gve = priv;
87+
rx->q_num = idx;
88+
rx->dqo.bufq.mask = buffer_queue_slots - 1;
89+
rx->dqo.complq.num_free_slots = completion_queue_slots;
90+
rx->dqo.complq.mask = completion_queue_slots - 1;
91+
rx->skb_head = NULL;
92+
rx->skb_tail = NULL;
93+
94+
rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
95+
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
96+
sizeof(rx->dqo.buf_states[0]),
97+
GFP_KERNEL);
98+
if (!rx->dqo.buf_states)
99+
return -ENOMEM;
100+
101+
/* Set up linked list of buffer IDs */
102+
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
103+
rx->dqo.buf_states[i].next = i + 1;
104+
105+
rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
106+
rx->dqo.recycled_buf_states.head = -1;
107+
rx->dqo.recycled_buf_states.tail = -1;
108+
rx->dqo.used_buf_states.head = -1;
109+
rx->dqo.used_buf_states.tail = -1;
110+
111+
/* Allocate RX completion queue */
112+
size = sizeof(rx->dqo.complq.desc_ring[0]) *
113+
completion_queue_slots;
114+
rx->dqo.complq.desc_ring =
115+
dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL);
116+
if (!rx->dqo.complq.desc_ring)
117+
goto err;
118+
119+
/* Allocate RX buffer queue */
120+
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
121+
rx->dqo.bufq.desc_ring =
122+
dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL);
123+
if (!rx->dqo.bufq.desc_ring)
124+
goto err;
125+
126+
rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
127+
&rx->q_resources_bus, GFP_KERNEL);
128+
if (!rx->q_resources)
129+
goto err;
130+
131+
gve_rx_add_to_block(priv, idx);
132+
133+
return 0;
134+
135+
err:
136+
gve_rx_free_ring_dqo(priv, idx);
137+
return -ENOMEM;
138+
}
139+
140+
int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
141+
{
142+
int err = 0;
143+
int i;
144+
145+
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
146+
err = gve_rx_alloc_ring_dqo(priv, i);
147+
if (err) {
148+
netif_err(priv, drv, priv->dev,
149+
"Failed to alloc rx ring=%d: err=%d\n",
150+
i, err);
151+
goto err;
152+
}
153+
}
154+
155+
return 0;
156+
157+
err:
158+
for (i--; i >= 0; i--)
159+
gve_rx_free_ring_dqo(priv, i);
160+
161+
return err;
162+
}
163+
164+
void gve_rx_free_rings_dqo(struct gve_priv *priv)
165+
{
166+
int i;
167+
168+
for (i = 0; i < priv->rx_cfg.num_queues; i++)
169+
gve_rx_free_ring_dqo(priv, i);
170+
}
171+
172+
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
173+
{
174+
}
175+
19176
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
20177
{
21178
u32 work_done = 0;

drivers/net/ethernet/google/gve/gve_tx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
256256
return err;
257257
}
258258

259-
void gve_tx_free_rings(struct gve_priv *priv)
259+
void gve_tx_free_rings_gqi(struct gve_priv *priv)
260260
{
261261
int i;
262262

0 commit comments

Comments
 (0)