@@ -189,7 +189,9 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
189
189
190
190
static struct sk_buff * ionic_rx_frags (struct ionic_queue * q ,
191
191
struct ionic_desc_info * desc_info ,
192
- struct ionic_rxq_comp * comp ,
192
+ unsigned int headroom ,
193
+ unsigned int len ,
194
+ unsigned int num_sg_elems ,
193
195
bool synced )
194
196
{
195
197
struct net_device * netdev = q -> lif -> netdev ;
@@ -199,12 +201,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
199
201
struct sk_buff * skb ;
200
202
unsigned int i ;
201
203
u16 frag_len ;
202
- u16 len ;
203
204
204
205
stats = q_to_rx_stats (q );
205
206
206
207
buf_info = & desc_info -> bufs [0 ];
207
- len = le16_to_cpu (comp -> len );
208
208
209
209
prefetchw (buf_info -> page );
210
210
@@ -216,30 +216,37 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
216
216
return NULL ;
217
217
}
218
218
219
- i = comp -> num_sg_elems + 1 ;
219
+ i = num_sg_elems + 1 ;
220
220
do {
221
221
if (unlikely (!buf_info -> page )) {
222
222
dev_kfree_skb (skb );
223
223
return NULL ;
224
224
}
225
225
226
- frag_len = min_t (u16 , len , ionic_rx_buf_size (buf_info ));
226
+ if (headroom )
227
+ frag_len = min_t (u16 , len , IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN );
228
+ else
229
+ frag_len = min_t (u16 , len , ionic_rx_buf_size (buf_info ));
227
230
len -= frag_len ;
228
231
229
232
if (!synced )
230
233
dma_sync_single_range_for_cpu (dev , ionic_rx_buf_pa (buf_info ),
231
- 0 , frag_len , DMA_FROM_DEVICE );
234
+ headroom , frag_len , DMA_FROM_DEVICE );
232
235
233
236
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
234
- buf_info -> page , buf_info -> page_offset , frag_len ,
235
- IONIC_PAGE_SIZE );
237
+ buf_info -> page , buf_info -> page_offset + headroom ,
238
+ frag_len , IONIC_PAGE_SIZE );
236
239
237
240
if (!ionic_rx_buf_recycle (q , buf_info , frag_len )) {
238
241
dma_unmap_page (dev , buf_info -> dma_addr ,
239
242
IONIC_PAGE_SIZE , DMA_FROM_DEVICE );
240
243
buf_info -> page = NULL ;
241
244
}
242
245
246
+ /* only needed on the first buffer */
247
+ if (headroom )
248
+ headroom = 0 ;
249
+
243
250
buf_info ++ ;
244
251
245
252
i -- ;
@@ -250,20 +257,19 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
250
257
251
258
static struct sk_buff * ionic_rx_copybreak (struct ionic_queue * q ,
252
259
struct ionic_desc_info * desc_info ,
253
- struct ionic_rxq_comp * comp ,
260
+ unsigned int headroom ,
261
+ unsigned int len ,
254
262
bool synced )
255
263
{
256
264
struct net_device * netdev = q -> lif -> netdev ;
257
265
struct ionic_buf_info * buf_info ;
258
266
struct ionic_rx_stats * stats ;
259
267
struct device * dev = q -> dev ;
260
268
struct sk_buff * skb ;
261
- u16 len ;
262
269
263
270
stats = q_to_rx_stats (q );
264
271
265
272
buf_info = & desc_info -> bufs [0 ];
266
- len = le16_to_cpu (comp -> len );
267
273
268
274
skb = napi_alloc_skb (& q_to_qcq (q )-> napi , len );
269
275
if (unlikely (!skb )) {
@@ -280,10 +286,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
280
286
281
287
if (!synced )
282
288
dma_sync_single_range_for_cpu (dev , ionic_rx_buf_pa (buf_info ),
283
- 0 , len , DMA_FROM_DEVICE );
284
- skb_copy_to_linear_data (skb , ionic_rx_buf_va (buf_info ), len );
289
+ headroom , len , DMA_FROM_DEVICE );
290
+ skb_copy_to_linear_data (skb , ionic_rx_buf_va (buf_info ) + headroom , len );
285
291
dma_sync_single_range_for_device (dev , ionic_rx_buf_pa (buf_info ),
286
- 0 , len , DMA_FROM_DEVICE );
292
+ headroom , len , DMA_FROM_DEVICE );
287
293
288
294
skb_put (skb , len );
289
295
skb -> protocol = eth_type_trans (skb , q -> lif -> netdev );
@@ -303,10 +309,10 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
303
309
304
310
xdp_init_buff (& xdp_buf , IONIC_PAGE_SIZE , rxq -> xdp_rxq_info );
305
311
xdp_prepare_buff (& xdp_buf , ionic_rx_buf_va (buf_info ),
306
- 0 , len , false);
312
+ XDP_PACKET_HEADROOM , len , false);
307
313
308
314
dma_sync_single_range_for_cpu (rxq -> dev , ionic_rx_buf_pa (buf_info ),
309
- 0 , len ,
315
+ XDP_PACKET_HEADROOM , len ,
310
316
DMA_FROM_DEVICE );
311
317
312
318
prefetchw (& xdp_buf .data_hard_start );
@@ -345,6 +351,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
345
351
struct ionic_rx_stats * stats ;
346
352
struct ionic_rxq_comp * comp ;
347
353
struct bpf_prog * xdp_prog ;
354
+ unsigned int headroom ;
348
355
struct sk_buff * skb ;
349
356
u16 len ;
350
357
@@ -366,10 +373,12 @@ static void ionic_rx_clean(struct ionic_queue *q,
366
373
ionic_run_xdp (stats , netdev , xdp_prog , q , desc_info -> bufs , len ))
367
374
return ;
368
375
376
+ headroom = q -> xdp_rxq_info ? XDP_PACKET_HEADROOM : 0 ;
369
377
if (len <= q -> lif -> rx_copybreak )
370
- skb = ionic_rx_copybreak (q , desc_info , comp , !!xdp_prog );
378
+ skb = ionic_rx_copybreak (q , desc_info , headroom , len , !!xdp_prog );
371
379
else
372
- skb = ionic_rx_frags (q , desc_info , comp , !!xdp_prog );
380
+ skb = ionic_rx_frags (q , desc_info , headroom , len ,
381
+ comp -> num_sg_elems , !!xdp_prog );
373
382
374
383
if (unlikely (!skb )) {
375
384
stats -> dropped ++ ;
@@ -493,8 +502,9 @@ void ionic_rx_fill(struct ionic_queue *q)
493
502
unsigned int frag_len ;
494
503
unsigned int nfrags ;
495
504
unsigned int n_fill ;
496
- unsigned int i , j ;
497
505
unsigned int len ;
506
+ unsigned int i ;
507
+ unsigned int j ;
498
508
499
509
n_fill = ionic_q_space_avail (q );
500
510
@@ -503,9 +513,12 @@ void ionic_rx_fill(struct ionic_queue *q)
503
513
if (n_fill < fill_threshold )
504
514
return ;
505
515
506
- len = netdev -> mtu + ETH_HLEN + VLAN_HLEN ;
516
+ len = netdev -> mtu + VLAN_ETH_HLEN ;
507
517
508
518
for (i = n_fill ; i ; i -- ) {
519
+ unsigned int headroom ;
520
+ unsigned int buf_len ;
521
+
509
522
nfrags = 0 ;
510
523
remain_len = len ;
511
524
desc_info = & q -> info [q -> head_idx ];
@@ -520,9 +533,18 @@ void ionic_rx_fill(struct ionic_queue *q)
520
533
}
521
534
}
522
535
523
- /* fill main descriptor - buf[0] */
524
- desc -> addr = cpu_to_le64 (ionic_rx_buf_pa (buf_info ));
525
- frag_len = min_t (u16 , len , ionic_rx_buf_size (buf_info ));
536
+ /* fill main descriptor - buf[0]
537
+ * XDP uses space in the first buffer, so account for
538
+ * head room, tail room, and ip header in the first frag size.
539
+ */
540
+ headroom = q -> xdp_rxq_info ? XDP_PACKET_HEADROOM : 0 ;
541
+ if (q -> xdp_rxq_info )
542
+ buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN ;
543
+ else
544
+ buf_len = ionic_rx_buf_size (buf_info );
545
+ frag_len = min_t (u16 , len , buf_len );
546
+
547
+ desc -> addr = cpu_to_le64 (ionic_rx_buf_pa (buf_info ) + headroom );
526
548
desc -> len = cpu_to_le16 (frag_len );
527
549
remain_len -= frag_len ;
528
550
buf_info ++ ;
0 commit comments