@@ -1569,6 +1569,17 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1569
1569
}
1570
1570
}
1571
1571
1572
+ /**
1573
+ * i40e_rx_offset - Return expected offset into page to access data
1574
+ * @rx_ring: Ring we are requesting offset of
1575
+ *
1576
+ * Returns the offset value for ring into the data buffer.
1577
+ */
1578
+ static unsigned int i40e_rx_offset (struct i40e_ring * rx_ring )
1579
+ {
1580
+ return ring_uses_build_skb (rx_ring ) ? I40E_SKB_PAD : 0 ;
1581
+ }
1582
+
1572
1583
/**
1573
1584
* i40e_setup_rx_descriptors - Allocate Rx descriptors
1574
1585
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -1597,6 +1608,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1597
1608
rx_ring -> next_to_alloc = 0 ;
1598
1609
rx_ring -> next_to_clean = 0 ;
1599
1610
rx_ring -> next_to_use = 0 ;
1611
+ rx_ring -> rx_offset = i40e_rx_offset (rx_ring );
1600
1612
1601
1613
/* XDP RX-queue info only needed for RX rings exposed to XDP */
1602
1614
if (rx_ring -> vsi -> type == I40E_VSI_MAIN ) {
@@ -1632,17 +1644,6 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1632
1644
writel (val , rx_ring -> tail );
1633
1645
}
1634
1646
1635
- /**
1636
- * i40e_rx_offset - Return expected offset into page to access data
1637
- * @rx_ring: Ring we are requesting offset of
1638
- *
1639
- * Returns the offset value for ring into the data buffer.
1640
- */
1641
- static inline unsigned int i40e_rx_offset (struct i40e_ring * rx_ring )
1642
- {
1643
- return ring_uses_build_skb (rx_ring ) ? I40E_SKB_PAD : 0 ;
1644
- }
1645
-
1646
1647
static unsigned int i40e_rx_frame_truesize (struct i40e_ring * rx_ring ,
1647
1648
unsigned int size )
1648
1649
{
@@ -1651,8 +1652,8 @@ static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1651
1652
#if (PAGE_SIZE < 8192 )
1652
1653
truesize = i40e_rx_pg_size (rx_ring ) / 2 ; /* Must be power-of-2 */
1653
1654
#else
1654
- truesize = i40e_rx_offset ( rx_ring ) ?
1655
- SKB_DATA_ALIGN (size + i40e_rx_offset ( rx_ring ) ) +
1655
+ truesize = rx_ring -> rx_offset ?
1656
+ SKB_DATA_ALIGN (size + rx_ring -> rx_offset ) +
1656
1657
SKB_DATA_ALIGN (sizeof (struct skb_shared_info )) :
1657
1658
SKB_DATA_ALIGN (size );
1658
1659
#endif
@@ -1703,7 +1704,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1703
1704
1704
1705
bi -> dma = dma ;
1705
1706
bi -> page = page ;
1706
- bi -> page_offset = i40e_rx_offset ( rx_ring ) ;
1707
+ bi -> page_offset = rx_ring -> rx_offset ;
1707
1708
page_ref_add (page , USHRT_MAX - 1 );
1708
1709
bi -> pagecnt_bias = USHRT_MAX ;
1709
1710
@@ -1963,9 +1964,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1963
1964
* @skb: pointer to current skb being fixed
1964
1965
* @rx_desc: pointer to the EOP Rx descriptor
1965
1966
*
1966
- * Also address the case where we are pulling data in on pages only
1967
- * and as such no data is present in the skb header.
1968
- *
1969
1967
* In addition if skb is not at least 60 bytes we need to pad it so that
1970
1968
* it is large enough to qualify as a valid Ethernet frame.
1971
1969
*
@@ -1998,33 +1996,15 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1998
1996
}
1999
1997
2000
1998
/**
2001
- * i40e_can_reuse_rx_page - Determine if this page can be reused by
2002
- * the adapter for another receive
2003
- *
1999
+ * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
2004
2000
* @rx_buffer: buffer containing the page
2005
2001
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
2006
2002
*
2007
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
2008
- * an unused region in the page.
2009
- *
2010
- * For small pages, @truesize will be a constant value, half the size
2011
- * of the memory at page. We'll attempt to alternate between high and
2012
- * low halves of the page, with one half ready for use by the hardware
2013
- * and the other half being consumed by the stack. We use the page
2014
- * ref count to determine whether the stack has finished consuming the
2015
- * portion of this page that was passed up with a previous packet. If
2016
- * the page ref count is >1, we'll assume the "other" half page is
2017
- * still busy, and this page cannot be reused.
2018
- *
2019
- * For larger pages, @truesize will be the actual space used by the
2020
- * received packet (adjusted upward to an even multiple of the cache
2021
- * line size). This will advance through the page by the amount
2022
- * actually consumed by the received packets while there is still
2023
- * space for a buffer. Each region of larger pages will be used at
2024
- * most once, after which the page will not be reused.
2025
- *
2026
- * In either case, if the page is reusable its refcount is increased.
2027
- **/
2003
+ * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
2004
+ * which will assign the current buffer to the buffer that next_to_alloc is
2005
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
2006
+ * page freed
2007
+ */
2028
2008
static bool i40e_can_reuse_rx_page (struct i40e_rx_buffer * rx_buffer ,
2029
2009
int rx_buffer_pgcnt )
2030
2010
{
@@ -2078,7 +2058,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
2078
2058
#if (PAGE_SIZE < 8192 )
2079
2059
unsigned int truesize = i40e_rx_pg_size (rx_ring ) / 2 ;
2080
2060
#else
2081
- unsigned int truesize = SKB_DATA_ALIGN (size + i40e_rx_offset ( rx_ring ) );
2061
+ unsigned int truesize = SKB_DATA_ALIGN (size + rx_ring -> rx_offset );
2082
2062
#endif
2083
2063
2084
2064
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags , rx_buffer -> page ,
@@ -2292,25 +2272,13 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2292
2272
* i40e_is_non_eop - process handling of non-EOP buffers
2293
2273
* @rx_ring: Rx ring being processed
2294
2274
* @rx_desc: Rx descriptor for current buffer
2295
- * @skb: Current socket buffer containing buffer in progress
2296
2275
*
2297
- * This function updates next to clean. If the buffer is an EOP buffer
2298
- * this function exits returning false, otherwise it will place the
2299
- * sk_buff in the next buffer to be chained and return true indicating
2300
- * that this is in fact a non-EOP buffer.
2301
- **/
2276
+ * If the buffer is an EOP buffer, this function exits returning false,
2277
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
2278
+ */
2302
2279
static bool i40e_is_non_eop (struct i40e_ring * rx_ring ,
2303
- union i40e_rx_desc * rx_desc ,
2304
- struct sk_buff * skb )
2280
+ union i40e_rx_desc * rx_desc )
2305
2281
{
2306
- u32 ntc = rx_ring -> next_to_clean + 1 ;
2307
-
2308
- /* fetch, update, and store next to clean */
2309
- ntc = (ntc < rx_ring -> count ) ? ntc : 0 ;
2310
- rx_ring -> next_to_clean = ntc ;
2311
-
2312
- prefetch (I40E_RX_DESC (rx_ring , ntc ));
2313
-
2314
2282
/* if we are the last buffer then there is nothing else to do */
2315
2283
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2316
2284
if (likely (i40e_test_staterr (rx_desc , I40E_RXD_EOF )))
@@ -2486,8 +2454,9 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
2486
2454
static int i40e_clean_rx_irq (struct i40e_ring * rx_ring , int budget )
2487
2455
{
2488
2456
unsigned int total_rx_bytes = 0 , total_rx_packets = 0 , frame_sz = 0 ;
2489
- struct sk_buff * skb = rx_ring -> skb ;
2490
2457
u16 cleaned_count = I40E_DESC_UNUSED (rx_ring );
2458
+ unsigned int offset = rx_ring -> rx_offset ;
2459
+ struct sk_buff * skb = rx_ring -> skb ;
2491
2460
unsigned int xdp_xmit = 0 ;
2492
2461
bool failure = false;
2493
2462
struct xdp_buff xdp ;
@@ -2547,7 +2516,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2547
2516
2548
2517
/* retrieve a buffer from the ring */
2549
2518
if (!skb ) {
2550
- unsigned int offset = i40e_rx_offset (rx_ring );
2551
2519
unsigned char * hard_start ;
2552
2520
2553
2521
hard_start = page_address (rx_buffer -> page ) +
@@ -2589,7 +2557,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2589
2557
i40e_put_rx_buffer (rx_ring , rx_buffer , rx_buffer_pgcnt );
2590
2558
cleaned_count ++ ;
2591
2559
2592
- if (i40e_is_non_eop (rx_ring , rx_desc , skb ))
2560
+ i40e_inc_ntc (rx_ring );
2561
+ if (i40e_is_non_eop (rx_ring , rx_desc ))
2593
2562
continue ;
2594
2563
2595
2564
if (i40e_cleanup_headers (rx_ring , skb , rx_desc )) {
0 commit comments