Skip to content

Commit 5cdaf9d

Browse files
committed
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue
Tony Nguyen says: ==================== 40GbE Intel Wired LAN Driver Updates 2021-02-12 This series contains updates to i40e, ice, and ixgbe drivers. Maciej does cleanups on the following drivers. For i40e, removes redundant check for XDP prog, cleans up no longer relevant information, and removes an unused function argument. For ice, removes local variable use, instead returning values directly. Moves skb pointer from buffer to ring and removes an unneeded check for xdp_prog in zero copy path. Also removes a redundant MTU check when changing it. For i40e, ice, and ixgbe, stores the rx_offset in the Rx ring as the value is constant so there's no need for continual calls. Bjorn folds a decrement into a while statement. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 7aceeb7 + c0d4e9d commit 5cdaf9d

File tree

10 files changed

+86
-136
lines changed

10 files changed

+86
-136
lines changed

drivers/net/ethernet/intel/i40e/i40e_main.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12947,9 +12947,6 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
1294712947
return -EINVAL;
1294812948
}
1294912949

12950-
if (!i40e_enabled_xdp_vsi(vsi) && !prog)
12951-
return 0;
12952-
1295312950
/* When turning XDP on->off/off->on we reset and rebuild the rings. */
1295412951
need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
1295512952

drivers/net/ethernet/intel/i40e/i40e_txrx.c

Lines changed: 30 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1569,6 +1569,17 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
15691569
}
15701570
}
15711571

1572+
/**
1573+
* i40e_rx_offset - Return expected offset into page to access data
1574+
* @rx_ring: Ring we are requesting offset of
1575+
*
1576+
* Returns the offset value for ring into the data buffer.
1577+
*/
1578+
static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1579+
{
1580+
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1581+
}
1582+
15721583
/**
15731584
* i40e_setup_rx_descriptors - Allocate Rx descriptors
15741585
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -1597,6 +1608,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
15971608
rx_ring->next_to_alloc = 0;
15981609
rx_ring->next_to_clean = 0;
15991610
rx_ring->next_to_use = 0;
1611+
rx_ring->rx_offset = i40e_rx_offset(rx_ring);
16001612

16011613
/* XDP RX-queue info only needed for RX rings exposed to XDP */
16021614
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
@@ -1632,17 +1644,6 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
16321644
writel(val, rx_ring->tail);
16331645
}
16341646

1635-
/**
1636-
* i40e_rx_offset - Return expected offset into page to access data
1637-
* @rx_ring: Ring we are requesting offset of
1638-
*
1639-
* Returns the offset value for ring into the data buffer.
1640-
*/
1641-
static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1642-
{
1643-
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1644-
}
1645-
16461647
static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
16471648
unsigned int size)
16481649
{
@@ -1651,8 +1652,8 @@ static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
16511652
#if (PAGE_SIZE < 8192)
16521653
truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
16531654
#else
1654-
truesize = i40e_rx_offset(rx_ring) ?
1655-
SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
1655+
truesize = rx_ring->rx_offset ?
1656+
SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
16561657
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
16571658
SKB_DATA_ALIGN(size);
16581659
#endif
@@ -1703,7 +1704,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
17031704

17041705
bi->dma = dma;
17051706
bi->page = page;
1706-
bi->page_offset = i40e_rx_offset(rx_ring);
1707+
bi->page_offset = rx_ring->rx_offset;
17071708
page_ref_add(page, USHRT_MAX - 1);
17081709
bi->pagecnt_bias = USHRT_MAX;
17091710

@@ -1963,9 +1964,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
19631964
* @skb: pointer to current skb being fixed
19641965
* @rx_desc: pointer to the EOP Rx descriptor
19651966
*
1966-
* Also address the case where we are pulling data in on pages only
1967-
* and as such no data is present in the skb header.
1968-
*
19691967
* In addition if skb is not at least 60 bytes we need to pad it so that
19701968
* it is large enough to qualify as a valid Ethernet frame.
19711969
*
@@ -1998,33 +1996,15 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
19981996
}
19991997

20001998
/**
2001-
* i40e_can_reuse_rx_page - Determine if this page can be reused by
2002-
* the adapter for another receive
2003-
*
1999+
* i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
20042000
* @rx_buffer: buffer containing the page
20052001
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
20062002
*
2007-
* If page is reusable, rx_buffer->page_offset is adjusted to point to
2008-
* an unused region in the page.
2009-
*
2010-
* For small pages, @truesize will be a constant value, half the size
2011-
* of the memory at page. We'll attempt to alternate between high and
2012-
* low halves of the page, with one half ready for use by the hardware
2013-
* and the other half being consumed by the stack. We use the page
2014-
* ref count to determine whether the stack has finished consuming the
2015-
* portion of this page that was passed up with a previous packet. If
2016-
* the page ref count is >1, we'll assume the "other" half page is
2017-
* still busy, and this page cannot be reused.
2018-
*
2019-
* For larger pages, @truesize will be the actual space used by the
2020-
* received packet (adjusted upward to an even multiple of the cache
2021-
* line size). This will advance through the page by the amount
2022-
* actually consumed by the received packets while there is still
2023-
* space for a buffer. Each region of larger pages will be used at
2024-
* most once, after which the page will not be reused.
2025-
*
2026-
* In either case, if the page is reusable its refcount is increased.
2027-
**/
2003+
* If page is reusable, we have a green light for calling i40e_reuse_rx_page,
2004+
* which will assign the current buffer to the buffer that next_to_alloc is
2005+
* pointing to; otherwise, the DMA mapping needs to be destroyed and
2006+
* page freed
2007+
*/
20282008
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
20292009
int rx_buffer_pgcnt)
20302010
{
@@ -2078,7 +2058,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
20782058
#if (PAGE_SIZE < 8192)
20792059
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
20802060
#else
2081-
unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
2061+
unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
20822062
#endif
20832063

20842064
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -2292,25 +2272,13 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
22922272
* i40e_is_non_eop - process handling of non-EOP buffers
22932273
* @rx_ring: Rx ring being processed
22942274
* @rx_desc: Rx descriptor for current buffer
2295-
* @skb: Current socket buffer containing buffer in progress
22962275
*
2297-
* This function updates next to clean. If the buffer is an EOP buffer
2298-
* this function exits returning false, otherwise it will place the
2299-
* sk_buff in the next buffer to be chained and return true indicating
2300-
* that this is in fact a non-EOP buffer.
2301-
**/
2276+
* If the buffer is an EOP buffer, this function exits returning false,
2277+
* otherwise return true indicating that this is in fact a non-EOP buffer.
2278+
*/
23022279
static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2303-
union i40e_rx_desc *rx_desc,
2304-
struct sk_buff *skb)
2280+
union i40e_rx_desc *rx_desc)
23052281
{
2306-
u32 ntc = rx_ring->next_to_clean + 1;
2307-
2308-
/* fetch, update, and store next to clean */
2309-
ntc = (ntc < rx_ring->count) ? ntc : 0;
2310-
rx_ring->next_to_clean = ntc;
2311-
2312-
prefetch(I40E_RX_DESC(rx_ring, ntc));
2313-
23142282
/* if we are the last buffer then there is nothing else to do */
23152283
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
23162284
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
@@ -2486,8 +2454,9 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
24862454
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
24872455
{
24882456
unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
2489-
struct sk_buff *skb = rx_ring->skb;
24902457
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2458+
unsigned int offset = rx_ring->rx_offset;
2459+
struct sk_buff *skb = rx_ring->skb;
24912460
unsigned int xdp_xmit = 0;
24922461
bool failure = false;
24932462
struct xdp_buff xdp;
@@ -2547,7 +2516,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
25472516

25482517
/* retrieve a buffer from the ring */
25492518
if (!skb) {
2550-
unsigned int offset = i40e_rx_offset(rx_ring);
25512519
unsigned char *hard_start;
25522520

25532521
hard_start = page_address(rx_buffer->page) +
@@ -2589,7 +2557,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
25892557
i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
25902558
cleaned_count++;
25912559

2592-
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2560+
i40e_inc_ntc(rx_ring);
2561+
if (i40e_is_non_eop(rx_ring, rx_desc))
25932562
continue;
25942563

25952564
if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {

drivers/net/ethernet/intel/i40e/i40e_txrx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,7 @@ struct i40e_ring {
387387
*/
388388

389389
struct i40e_channel *ch;
390+
u16 rx_offset;
390391
struct xdp_rxq_info xdp_rxq;
391392
struct xsk_buff_pool *xsk_pool;
392393
struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */

drivers/net/ethernet/intel/i40e/i40e_xsk.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -215,9 +215,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
215215
bi = i40e_rx_bi(rx_ring, 0);
216216
ntu = 0;
217217
}
218-
219-
count--;
220-
} while (count);
218+
} while (--count);
221219

222220
no_buffers:
223221
if (rx_ring->next_to_use != ntu) {

drivers/net/ethernet/intel/ice/ice_main.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6154,15 +6154,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
61546154
}
61556155
}
61566156

6157-
if (new_mtu < (int)netdev->min_mtu) {
6158-
netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
6159-
netdev->min_mtu);
6160-
return -EINVAL;
6161-
} else if (new_mtu > (int)netdev->max_mtu) {
6162-
netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
6163-
netdev->min_mtu);
6164-
return -EINVAL;
6165-
}
61666157
/* if a reset is in progress, wait for some time for it to complete */
61676158
do {
61686159
if (ice_is_reset_in_progress(pf->state)) {

0 commit comments

Comments
 (0)