@@ -22,11 +22,13 @@ static int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
22
22
}
23
23
24
24
static void gve_free_page_dqo (struct gve_priv * priv ,
25
- struct gve_rx_buf_state_dqo * bs )
25
+ struct gve_rx_buf_state_dqo * bs ,
26
+ bool free_page )
26
27
{
27
28
page_ref_sub (bs -> page_info .page , bs -> page_info .pagecnt_bias - 1 );
28
- gve_free_page (& priv -> pdev -> dev , bs -> page_info .page , bs -> addr ,
29
- DMA_FROM_DEVICE );
29
+ if (free_page )
30
+ gve_free_page (& priv -> pdev -> dev , bs -> page_info .page , bs -> addr ,
31
+ DMA_FROM_DEVICE );
30
32
bs -> page_info .page = NULL ;
31
33
}
32
34
@@ -130,12 +132,20 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx)
130
132
*/
131
133
for (i = 0 ; i < 5 ; i ++ ) {
132
134
buf_state = gve_dequeue_buf_state (rx , & rx -> dqo .used_buf_states );
133
- if (gve_buf_ref_cnt (buf_state ) == 0 )
135
+ if (gve_buf_ref_cnt (buf_state ) == 0 ) {
136
+ rx -> dqo .used_buf_states_cnt -- ;
134
137
return buf_state ;
138
+ }
135
139
136
140
gve_enqueue_buf_state (rx , & rx -> dqo .used_buf_states , buf_state );
137
141
}
138
142
143
+ /* For QPL, we cannot allocate any new buffers and must
144
+ * wait for the existing ones to be available.
145
+ */
146
+ if (rx -> dqo .qpl )
147
+ return NULL ;
148
+
139
149
/* If there are no free buf states discard an entry from
140
150
* `used_buf_states` so it can be used.
141
151
*/
@@ -144,23 +154,39 @@ gve_get_recycled_buf_state(struct gve_rx_ring *rx)
144
154
if (gve_buf_ref_cnt (buf_state ) == 0 )
145
155
return buf_state ;
146
156
147
- gve_free_page_dqo (rx -> gve , buf_state );
157
+ gve_free_page_dqo (rx -> gve , buf_state , true );
148
158
gve_free_buf_state (rx , buf_state );
149
159
}
150
160
151
161
return NULL ;
152
162
}
153
163
154
- static int gve_alloc_page_dqo (struct gve_priv * priv ,
164
+ static int gve_alloc_page_dqo (struct gve_rx_ring * rx ,
155
165
struct gve_rx_buf_state_dqo * buf_state )
156
166
{
157
- int err ;
167
+ struct gve_priv * priv = rx -> gve ;
168
+ u32 idx ;
158
169
159
- err = gve_alloc_page (priv , & priv -> pdev -> dev , & buf_state -> page_info .page ,
160
- & buf_state -> addr , DMA_FROM_DEVICE , GFP_ATOMIC );
161
- if (err )
162
- return err ;
170
+ if (!rx -> dqo .qpl ) {
171
+ int err ;
163
172
173
+ err = gve_alloc_page (priv , & priv -> pdev -> dev ,
174
+ & buf_state -> page_info .page ,
175
+ & buf_state -> addr ,
176
+ DMA_FROM_DEVICE , GFP_ATOMIC );
177
+ if (err )
178
+ return err ;
179
+ } else {
180
+ idx = rx -> dqo .next_qpl_page_idx ;
181
+ if (idx >= priv -> rx_pages_per_qpl ) {
182
+ net_err_ratelimited ("%s: Out of QPL pages\n" ,
183
+ priv -> dev -> name );
184
+ return - ENOMEM ;
185
+ }
186
+ buf_state -> page_info .page = rx -> dqo .qpl -> pages [idx ];
187
+ buf_state -> addr = rx -> dqo .qpl -> page_buses [idx ];
188
+ rx -> dqo .next_qpl_page_idx ++ ;
189
+ }
164
190
buf_state -> page_info .page_offset = 0 ;
165
191
buf_state -> page_info .page_address =
166
192
page_address (buf_state -> page_info .page );
@@ -195,9 +221,13 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
195
221
196
222
for (i = 0 ; i < rx -> dqo .num_buf_states ; i ++ ) {
197
223
struct gve_rx_buf_state_dqo * bs = & rx -> dqo .buf_states [i ];
198
-
224
+ /* Only free page for RDA. QPL pages are freed in gve_main. */
199
225
if (bs -> page_info .page )
200
- gve_free_page_dqo (priv , bs );
226
+ gve_free_page_dqo (priv , bs , !rx -> dqo .qpl );
227
+ }
228
+ if (rx -> dqo .qpl ) {
229
+ gve_unassign_qpl (priv , rx -> dqo .qpl -> id );
230
+ rx -> dqo .qpl = NULL ;
201
231
}
202
232
203
233
if (rx -> dqo .bufq .desc_ring ) {
@@ -229,7 +259,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
229
259
int i ;
230
260
231
261
const u32 buffer_queue_slots =
232
- priv -> options_dqo_rda .rx_buff_ring_entries ;
262
+ priv -> queue_format == GVE_DQO_RDA_FORMAT ?
263
+ priv -> options_dqo_rda .rx_buff_ring_entries : priv -> rx_desc_cnt ;
233
264
const u32 completion_queue_slots = priv -> rx_desc_cnt ;
234
265
235
266
netif_dbg (priv , drv , priv -> dev , "allocating rx ring DQO\n" );
@@ -243,7 +274,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
243
274
rx -> ctx .skb_head = NULL ;
244
275
rx -> ctx .skb_tail = NULL ;
245
276
246
- rx -> dqo .num_buf_states = min_t (s16 , S16_MAX , buffer_queue_slots * 4 );
277
+ rx -> dqo .num_buf_states = priv -> queue_format == GVE_DQO_RDA_FORMAT ?
278
+ min_t (s16 , S16_MAX , buffer_queue_slots * 4 ) :
279
+ priv -> rx_pages_per_qpl ;
247
280
rx -> dqo .buf_states = kvcalloc (rx -> dqo .num_buf_states ,
248
281
sizeof (rx -> dqo .buf_states [0 ]),
249
282
GFP_KERNEL );
@@ -275,6 +308,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
275
308
if (!rx -> dqo .bufq .desc_ring )
276
309
goto err ;
277
310
311
+ if (priv -> queue_format != GVE_DQO_RDA_FORMAT ) {
312
+ rx -> dqo .qpl = gve_assign_rx_qpl (priv );
313
+ if (!rx -> dqo .qpl )
314
+ goto err ;
315
+ rx -> dqo .next_qpl_page_idx = 0 ;
316
+ }
317
+
278
318
rx -> q_resources = dma_alloc_coherent (hdev , sizeof (* rx -> q_resources ),
279
319
& rx -> q_resources_bus , GFP_KERNEL );
280
320
if (!rx -> q_resources )
@@ -352,7 +392,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
352
392
if (unlikely (!buf_state ))
353
393
break ;
354
394
355
- if (unlikely (gve_alloc_page_dqo (priv , buf_state ))) {
395
+ if (unlikely (gve_alloc_page_dqo (rx , buf_state ))) {
356
396
u64_stats_update_begin (& rx -> statss );
357
397
rx -> rx_buf_alloc_fail ++ ;
358
398
u64_stats_update_end (& rx -> statss );
@@ -415,6 +455,7 @@ static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
415
455
416
456
mark_used :
417
457
gve_enqueue_buf_state (rx , & rx -> dqo .used_buf_states , buf_state );
458
+ rx -> dqo .used_buf_states_cnt ++ ;
418
459
}
419
460
420
461
static void gve_rx_skb_csum (struct sk_buff * skb ,
@@ -477,6 +518,43 @@ static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
477
518
rx -> ctx .skb_tail = NULL ;
478
519
}
479
520
521
+ static bool gve_rx_should_trigger_copy_ondemand (struct gve_rx_ring * rx )
522
+ {
523
+ if (!rx -> dqo .qpl )
524
+ return false;
525
+ if (rx -> dqo .used_buf_states_cnt <
526
+ (rx -> dqo .num_buf_states -
527
+ GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD ))
528
+ return false;
529
+ return true;
530
+ }
531
+
532
+ static int gve_rx_copy_ondemand (struct gve_rx_ring * rx ,
533
+ struct gve_rx_buf_state_dqo * buf_state ,
534
+ u16 buf_len )
535
+ {
536
+ struct page * page = alloc_page (GFP_ATOMIC );
537
+ int num_frags ;
538
+
539
+ if (!page )
540
+ return - ENOMEM ;
541
+
542
+ memcpy (page_address (page ),
543
+ buf_state -> page_info .page_address +
544
+ buf_state -> page_info .page_offset ,
545
+ buf_len );
546
+ num_frags = skb_shinfo (rx -> ctx .skb_tail )-> nr_frags ;
547
+ skb_add_rx_frag (rx -> ctx .skb_tail , num_frags , page ,
548
+ 0 , buf_len , PAGE_SIZE );
549
+
550
+ u64_stats_update_begin (& rx -> statss );
551
+ rx -> rx_frag_alloc_cnt ++ ;
552
+ u64_stats_update_end (& rx -> statss );
553
+ /* Return unused buffer. */
554
+ gve_enqueue_buf_state (rx , & rx -> dqo .recycled_buf_states , buf_state );
555
+ return 0 ;
556
+ }
557
+
480
558
/* Chains multi skbs for single rx packet.
481
559
* Returns 0 if buffer is appended, -1 otherwise.
482
560
*/
@@ -504,12 +582,20 @@ static int gve_rx_append_frags(struct napi_struct *napi,
504
582
rx -> ctx .skb_head -> truesize += priv -> data_buffer_size_dqo ;
505
583
}
506
584
585
+ /* Trigger ondemand page allocation if we are running low on buffers */
586
+ if (gve_rx_should_trigger_copy_ondemand (rx ))
587
+ return gve_rx_copy_ondemand (rx , buf_state , buf_len );
588
+
507
589
skb_add_rx_frag (rx -> ctx .skb_tail , num_frags ,
508
590
buf_state -> page_info .page ,
509
591
buf_state -> page_info .page_offset ,
510
592
buf_len , priv -> data_buffer_size_dqo );
511
593
gve_dec_pagecnt_bias (& buf_state -> page_info );
512
594
595
+ /* Advances buffer page-offset if page is partially used.
596
+ * Marks buffer as used if page is full.
597
+ */
598
+ gve_try_recycle_buf (priv , rx , buf_state );
513
599
return 0 ;
514
600
}
515
601
@@ -563,8 +649,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
563
649
priv )) != 0 ) {
564
650
goto error ;
565
651
}
566
-
567
- gve_try_recycle_buf (priv , rx , buf_state );
568
652
return 0 ;
569
653
}
570
654
@@ -590,6 +674,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
590
674
goto error ;
591
675
rx -> ctx .skb_tail = rx -> ctx .skb_head ;
592
676
677
+ if (gve_rx_should_trigger_copy_ondemand (rx )) {
678
+ if (gve_rx_copy_ondemand (rx , buf_state , buf_len ) < 0 )
679
+ goto error ;
680
+ return 0 ;
681
+ }
682
+
593
683
skb_add_rx_frag (rx -> ctx .skb_head , 0 , buf_state -> page_info .page ,
594
684
buf_state -> page_info .page_offset , buf_len ,
595
685
priv -> data_buffer_size_dqo );
0 commit comments