Skip to content

Commit de1db4a

Browse files
committed
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue
Tony Nguyen says: ==================== 40GbE Intel Wired LAN Driver Updates 2021-02-08 This series contains updates to i40e driver only. Cristian makes improvements to driver XDP path. Avoids writing next-to-clean pointer on every update, removes redundant updates of cleaned_count and buffer info, creates a helper function to consolidate XDP actions and simplifies some of the behavior. Eryk adds messages to inform the user when MTU is larger than supported ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 74784ee + 613142b commit de1db4a

File tree

2 files changed

+86
-74
lines changed

2 files changed

+86
-74
lines changed

drivers/net/ethernet/intel/i40e/i40e_main.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12448,9 +12448,10 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
1244812448
* i40e_xdp_setup - add/remove an XDP program
1244912449
* @vsi: VSI to changed
1245012450
* @prog: XDP program
12451+
* @extack: netlink extended ack
1245112452
**/
12452-
static int i40e_xdp_setup(struct i40e_vsi *vsi,
12453-
struct bpf_prog *prog)
12453+
static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
12454+
struct netlink_ext_ack *extack)
1245412455
{
1245512456
int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1245612457
struct i40e_pf *pf = vsi->back;
@@ -12459,8 +12460,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
1245912460
int i;
1246012461

1246112462
/* Don't allow frames that span over multiple buffers */
12462-
if (frame_size > vsi->rx_buf_len)
12463+
if (frame_size > vsi->rx_buf_len) {
12464+
NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
1246312465
return -EINVAL;
12466+
}
1246412467

1246512468
if (!i40e_enabled_xdp_vsi(vsi) && !prog)
1246612469
return 0;
@@ -12769,7 +12772,7 @@ static int i40e_xdp(struct net_device *dev,
1276912772

1277012773
switch (xdp->command) {
1277112774
case XDP_SETUP_PROG:
12772-
return i40e_xdp_setup(vsi, xdp->prog);
12775+
return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
1277312776
case XDP_SETUP_XSK_POOL:
1277412777
return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
1277512778
xdp->xsk.queue_id);

drivers/net/ethernet/intel/i40e/i40e_xsk.c

Lines changed: 79 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -250,27 +250,68 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
250250
xdp->data_end - xdp->data_hard_start,
251251
GFP_ATOMIC | __GFP_NOWARN);
252252
if (unlikely(!skb))
253-
return NULL;
253+
goto out;
254254

255255
skb_reserve(skb, xdp->data - xdp->data_hard_start);
256256
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
257257
if (metasize)
258258
skb_metadata_set(skb, metasize);
259259

260+
out:
260261
xsk_buff_free(xdp);
261262
return skb;
262263
}
263264

264-
/**
265-
* i40e_inc_ntc: Advance the next_to_clean index
266-
* @rx_ring: Rx ring
267-
**/
268-
static void i40e_inc_ntc(struct i40e_ring *rx_ring)
265+
static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
266+
struct xdp_buff *xdp_buff,
267+
union i40e_rx_desc *rx_desc,
268+
unsigned int *rx_packets,
269+
unsigned int *rx_bytes,
270+
unsigned int size,
271+
unsigned int xdp_res)
269272
{
270-
u32 ntc = rx_ring->next_to_clean + 1;
273+
struct sk_buff *skb;
274+
275+
*rx_packets = 1;
276+
*rx_bytes = size;
277+
278+
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
279+
return;
280+
281+
if (xdp_res == I40E_XDP_CONSUMED) {
282+
xsk_buff_free(xdp_buff);
283+
return;
284+
}
285+
286+
if (xdp_res == I40E_XDP_PASS) {
287+
/* NB! We are not checking for errors using
288+
* i40e_test_staterr with
289+
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
290+
* SBP is *not* set in PRT_SBPVSI (default not set).
291+
*/
292+
skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
293+
if (!skb) {
294+
rx_ring->rx_stats.alloc_buff_failed++;
295+
*rx_packets = 0;
296+
*rx_bytes = 0;
297+
return;
298+
}
271299

272-
ntc = (ntc < rx_ring->count) ? ntc : 0;
273-
rx_ring->next_to_clean = ntc;
300+
if (eth_skb_pad(skb)) {
301+
*rx_packets = 0;
302+
*rx_bytes = 0;
303+
return;
304+
}
305+
306+
*rx_bytes = skb->len;
307+
i40e_process_skb_fields(rx_ring, rx_desc, skb);
308+
napi_gro_receive(&rx_ring->q_vector->napi, skb);
309+
return;
310+
}
311+
312+
/* Should never get here, as all valid cases have been handled already.
313+
*/
314+
WARN_ON_ONCE(1);
274315
}
275316

276317
/**
@@ -284,17 +325,20 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
284325
{
285326
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
286327
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
328+
u16 next_to_clean = rx_ring->next_to_clean;
329+
u16 count_mask = rx_ring->count - 1;
287330
unsigned int xdp_res, xdp_xmit = 0;
288331
bool failure = false;
289-
struct sk_buff *skb;
290332

291333
while (likely(total_rx_packets < (unsigned int)budget)) {
292334
union i40e_rx_desc *rx_desc;
293-
struct xdp_buff **bi;
335+
unsigned int rx_packets;
336+
unsigned int rx_bytes;
337+
struct xdp_buff *bi;
294338
unsigned int size;
295339
u64 qword;
296340

297-
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
341+
rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
298342
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
299343

300344
/* This memory barrier is needed to keep us from reading
@@ -307,11 +351,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
307351
i40e_clean_programming_status(rx_ring,
308352
rx_desc->raw.qword[0],
309353
qword);
310-
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
311-
xsk_buff_free(*bi);
312-
*bi = NULL;
313-
cleaned_count++;
314-
i40e_inc_ntc(rx_ring);
354+
bi = *i40e_rx_bi(rx_ring, next_to_clean);
355+
xsk_buff_free(bi);
356+
next_to_clean = (next_to_clean + 1) & count_mask;
315357
continue;
316358
}
317359

@@ -320,61 +362,30 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
320362
if (!size)
321363
break;
322364

323-
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
324-
(*bi)->data_end = (*bi)->data + size;
325-
xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
326-
327-
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
328-
if (xdp_res) {
329-
if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
330-
xdp_xmit |= xdp_res;
331-
else
332-
xsk_buff_free(*bi);
333-
334-
*bi = NULL;
335-
total_rx_bytes += size;
336-
total_rx_packets++;
337-
338-
cleaned_count++;
339-
i40e_inc_ntc(rx_ring);
340-
continue;
341-
}
342-
343-
/* XDP_PASS path */
344-
345-
/* NB! We are not checking for errors using
346-
* i40e_test_staterr with
347-
* BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
348-
* SBP is *not* set in PRT_SBPVSI (default not set).
349-
*/
350-
skb = i40e_construct_skb_zc(rx_ring, *bi);
351-
if (!skb) {
352-
rx_ring->rx_stats.alloc_buff_failed++;
353-
break;
354-
}
355-
356-
*bi = NULL;
357-
cleaned_count++;
358-
i40e_inc_ntc(rx_ring);
359-
360-
if (eth_skb_pad(skb))
361-
continue;
362-
363-
total_rx_bytes += skb->len;
364-
total_rx_packets++;
365-
366-
i40e_process_skb_fields(rx_ring, rx_desc, skb);
367-
napi_gro_receive(&rx_ring->q_vector->napi, skb);
365+
bi = *i40e_rx_bi(rx_ring, next_to_clean);
366+
bi->data_end = bi->data + size;
367+
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
368+
369+
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
370+
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
371+
&rx_bytes, size, xdp_res);
372+
total_rx_packets += rx_packets;
373+
total_rx_bytes += rx_bytes;
374+
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
375+
next_to_clean = (next_to_clean + 1) & count_mask;
368376
}
369377

378+
rx_ring->next_to_clean = next_to_clean;
379+
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
380+
370381
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
371382
failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
372383

373384
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
374385
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
375386

376387
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
377-
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
388+
if (failure || next_to_clean == rx_ring->next_to_use)
378389
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
379390
else
380391
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
@@ -604,16 +615,14 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
604615

605616
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
606617
{
607-
u16 i;
608-
609-
for (i = 0; i < rx_ring->count; i++) {
610-
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
618+
u16 count_mask = rx_ring->count - 1;
619+
u16 ntc = rx_ring->next_to_clean;
620+
u16 ntu = rx_ring->next_to_use;
611621

612-
if (!rx_bi)
613-
continue;
622+
for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
623+
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
614624

615625
xsk_buff_free(rx_bi);
616-
rx_bi = NULL;
617626
}
618627
}
619628

0 commit comments

Comments
 (0)