Skip to content

Commit 9e262ef

Browse files
rushil-googlevijay-suman
authored andcommitted
gve: RX path for DQO-QPL
The RX path allocates the QPL page pool at queue creation, and tries to reuse these pages through page recycling. This patch ensures that on refill no non-QPL pages are posted to the device. When the driver is running low on free buffers, an ondemand allocation step kicks in that allocates a non-qpl page for SKB business to free up the QPL page in use. gve_try_recycle_buf was moved to gve_rx_append_frags so that driver does not attempt to mark buffer as used if a non-qpl page was allocated ondemand. Signed-off-by: Rushil Gupta <[email protected]> Reviewed-by: Willem de Bruijn <[email protected]> Signed-off-by: Praveen Kaligineedi <[email protected]> Signed-off-by: Bailey Forrest <[email protected]> Signed-off-by: David S. Miller <[email protected]> (cherry picked from commit e7075ab) Orabug: 37356729 Signed-off-by: Yifei Liu <[email protected]> Reviewed-by: Saeed Mirzamohammadi <[email protected]> Conflict: drivers/net/ethernet/google/gve/gve_rx_dqo.c gve_assign_rx_qpl has only one paramater in v5.15, delete the second paramater and match google 5.15 branch version. Signed-off-by: Saeed Mirzamohammadi <[email protected]> Signed-off-by: Vijayendra Suman <[email protected]>
1 parent cccfc84 commit 9e262ef

File tree

2 files changed

+114
-18
lines changed

2 files changed

+114
-18
lines changed

drivers/net/ethernet/google/gve/gve.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,12 @@ struct gve_rx_ring {
238238

239239
/* qpl assigned to this queue */
240240
struct gve_queue_page_list *qpl;
241+
242+
/* index into queue page list */
243+
u32 next_qpl_page_idx;
244+
245+
/* track number of used buffers */
246+
u16 used_buf_states_cnt;
241247
} dqo;
242248
};
243249

drivers/net/ethernet/google/gve/gve_rx_dqo.c

Lines changed: 108 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,13 @@ static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
2222
}
2323

2424
static void gve_free_page_dqo(struct gve_priv *priv,
25-
struct gve_rx_buf_state_dqo *bs)
25+
struct gve_rx_buf_state_dqo *bs,
26+
bool free_page)
2627
{
2728
page_ref_sub(bs->page_info.page, bs->page_info.pagecnt_bias - 1);
28-
gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
29-
DMA_FROM_DEVICE);
29+
if (free_page)
30+
gve_free_page(&priv->pdev->dev, bs->page_info.page, bs->addr,
31+
DMA_FROM_DEVICE);
3032
bs->page_info.page = NULL;
3133
}
3234

@@ -130,12 +132,20 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx)
130132
*/
131133
for (i = 0; i < 5; i++) {
132134
buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states);
133-
if (gve_buf_ref_cnt(buf_state) == 0)
135+
if (gve_buf_ref_cnt(buf_state) == 0) {
136+
rx->dqo.used_buf_states_cnt--;
134137
return buf_state;
138+
}
135139

136140
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
137141
}
138142

143+
/* For QPL, we cannot allocate any new buffers and must
144+
* wait for the existing ones to be available.
145+
*/
146+
if (rx->dqo.qpl)
147+
return NULL;
148+
139149
/* If there are no free buf states discard an entry from
140150
* `used_buf_states` so it can be used.
141151
*/
@@ -144,23 +154,39 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx)
144154
if (gve_buf_ref_cnt(buf_state) == 0)
145155
return buf_state;
146156

147-
gve_free_page_dqo(rx->gve, buf_state);
157+
gve_free_page_dqo(rx->gve, buf_state, true);
148158
gve_free_buf_state(rx, buf_state);
149159
}
150160

151161
return NULL;
152162
}
153163

154-
static int gve_alloc_page_dqo(struct gve_priv *priv,
164+
static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
155165
struct gve_rx_buf_state_dqo *buf_state)
156166
{
157-
int err;
167+
struct gve_priv *priv = rx->gve;
168+
u32 idx;
158169

159-
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
160-
&buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
161-
if (err)
162-
return err;
170+
if (!rx->dqo.qpl) {
171+
int err;
163172

173+
err = gve_alloc_page(priv, &priv->pdev->dev,
174+
&buf_state->page_info.page,
175+
&buf_state->addr,
176+
DMA_FROM_DEVICE, GFP_ATOMIC);
177+
if (err)
178+
return err;
179+
} else {
180+
idx = rx->dqo.next_qpl_page_idx;
181+
if (idx >= priv->rx_pages_per_qpl) {
182+
net_err_ratelimited("%s: Out of QPL pages\n",
183+
priv->dev->name);
184+
return -ENOMEM;
185+
}
186+
buf_state->page_info.page = rx->dqo.qpl->pages[idx];
187+
buf_state->addr = rx->dqo.qpl->page_buses[idx];
188+
rx->dqo.next_qpl_page_idx++;
189+
}
164190
buf_state->page_info.page_offset = 0;
165191
buf_state->page_info.page_address =
166192
page_address(buf_state->page_info.page);
@@ -195,9 +221,13 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
195221

196222
for (i = 0; i < rx->dqo.num_buf_states; i++) {
197223
struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
198-
224+
/* Only free page for RDA. QPL pages are freed in gve_main. */
199225
if (bs->page_info.page)
200-
gve_free_page_dqo(priv, bs);
226+
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
227+
}
228+
if (rx->dqo.qpl) {
229+
gve_unassign_qpl(priv, rx->dqo.qpl->id);
230+
rx->dqo.qpl = NULL;
201231
}
202232

203233
if (rx->dqo.bufq.desc_ring) {
@@ -229,7 +259,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
229259
int i;
230260

231261
const u32 buffer_queue_slots =
232-
priv->options_dqo_rda.rx_buff_ring_entries;
262+
priv->queue_format == GVE_DQO_RDA_FORMAT ?
263+
priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt;
233264
const u32 completion_queue_slots = priv->rx_desc_cnt;
234265

235266
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -243,7 +274,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
243274
rx->ctx.skb_head = NULL;
244275
rx->ctx.skb_tail = NULL;
245276

246-
rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
277+
rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ?
278+
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
279+
priv->rx_pages_per_qpl;
247280
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
248281
sizeof(rx->dqo.buf_states[0]),
249282
GFP_KERNEL);
@@ -275,6 +308,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
275308
if (!rx->dqo.bufq.desc_ring)
276309
goto err;
277310

311+
if (priv->queue_format != GVE_DQO_RDA_FORMAT) {
312+
rx->dqo.qpl = gve_assign_rx_qpl(priv);
313+
if (!rx->dqo.qpl)
314+
goto err;
315+
rx->dqo.next_qpl_page_idx = 0;
316+
}
317+
278318
rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources),
279319
&rx->q_resources_bus, GFP_KERNEL);
280320
if (!rx->q_resources)
@@ -352,7 +392,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
352392
if (unlikely(!buf_state))
353393
break;
354394

355-
if (unlikely(gve_alloc_page_dqo(priv, buf_state))) {
395+
if (unlikely(gve_alloc_page_dqo(rx, buf_state))) {
356396
u64_stats_update_begin(&rx->statss);
357397
rx->rx_buf_alloc_fail++;
358398
u64_stats_update_end(&rx->statss);
@@ -415,6 +455,7 @@ static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
415455

416456
mark_used:
417457
gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state);
458+
rx->dqo.used_buf_states_cnt++;
418459
}
419460

420461
static void gve_rx_skb_csum(struct sk_buff *skb,
@@ -477,6 +518,43 @@ static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
477518
rx->ctx.skb_tail = NULL;
478519
}
479520

521+
static bool gve_rx_should_trigger_copy_ondemand(struct gve_rx_ring *rx)
522+
{
523+
if (!rx->dqo.qpl)
524+
return false;
525+
if (rx->dqo.used_buf_states_cnt <
526+
(rx->dqo.num_buf_states -
527+
GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD))
528+
return false;
529+
return true;
530+
}
531+
532+
static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
533+
struct gve_rx_buf_state_dqo *buf_state,
534+
u16 buf_len)
535+
{
536+
struct page *page = alloc_page(GFP_ATOMIC);
537+
int num_frags;
538+
539+
if (!page)
540+
return -ENOMEM;
541+
542+
memcpy(page_address(page),
543+
buf_state->page_info.page_address +
544+
buf_state->page_info.page_offset,
545+
buf_len);
546+
num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags;
547+
skb_add_rx_frag(rx->ctx.skb_tail, num_frags, page,
548+
0, buf_len, PAGE_SIZE);
549+
550+
u64_stats_update_begin(&rx->statss);
551+
rx->rx_frag_alloc_cnt++;
552+
u64_stats_update_end(&rx->statss);
553+
/* Return unused buffer. */
554+
gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
555+
return 0;
556+
}
557+
480558
/* Chains multi skbs for single rx packet.
481559
* Returns 0 if buffer is appended, -1 otherwise.
482560
*/
@@ -504,12 +582,20 @@ static int gve_rx_append_frags(struct napi_struct *napi,
504582
rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
505583
}
506584

585+
/* Trigger ondemand page allocation if we are running low on buffers */
586+
if (gve_rx_should_trigger_copy_ondemand(rx))
587+
return gve_rx_copy_ondemand(rx, buf_state, buf_len);
588+
507589
skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
508590
buf_state->page_info.page,
509591
buf_state->page_info.page_offset,
510592
buf_len, priv->data_buffer_size_dqo);
511593
gve_dec_pagecnt_bias(&buf_state->page_info);
512594

595+
/* Advances buffer page-offset if page is partially used.
596+
* Marks buffer as used if page is full.
597+
*/
598+
gve_try_recycle_buf(priv, rx, buf_state);
513599
return 0;
514600
}
515601

@@ -563,8 +649,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
563649
priv)) != 0) {
564650
goto error;
565651
}
566-
567-
gve_try_recycle_buf(priv, rx, buf_state);
568652
return 0;
569653
}
570654

@@ -590,6 +674,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
590674
goto error;
591675
rx->ctx.skb_tail = rx->ctx.skb_head;
592676

677+
if (gve_rx_should_trigger_copy_ondemand(rx)) {
678+
if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0)
679+
goto error;
680+
return 0;
681+
}
682+
593683
skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
594684
buf_state->page_info.page_offset, buf_len,
595685
priv->data_buffer_size_dqo);

0 commit comments

Comments
 (0)