Skip to content

Commit 18cb652

Browse files
Alexander DuyckJeff Kirsher
authored andcommitted
ixgbe: Clean-up page reuse code
This patch cleans up the page reuse code getting it into a state where all the workarounds needed are in place as well as cleaning up a few minor oversights such as using __free_pages instead of put_page to drop a locally allocated page. It also cleans up how we clear the descriptor status bits. Previously they were zeroed as a part of clearing the hdr_addr. However the hdr_addr is a 64 bit field and 64 bit writes can be a bit more expensive on on 32 bit systems. Since we are no longer using the header split feature the upper 32 bits of the address no longer need to be cleared. As a result we can just clear the status bits and leave the length and VLAN fields as-is which should provide more information in debugging. Cc: Don Skidmore <[email protected]> Signed-off-by: Alexander Duyck <[email protected]> Tested-by: Phil Schmitt <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent d8febb7 commit 18cb652

File tree

1 file changed

+36
-42
lines changed

1 file changed

+36
-42
lines changed

drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

Lines changed: 36 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1436,20 +1436,17 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
14361436
struct ixgbe_rx_buffer *bi)
14371437
{
14381438
struct page *page = bi->page;
1439-
dma_addr_t dma = bi->dma;
1439+
dma_addr_t dma;
14401440

14411441
/* since we are recycling buffers we should seldom need to alloc */
1442-
if (likely(dma))
1442+
if (likely(page))
14431443
return true;
14441444

14451445
/* alloc new page for storage */
1446-
if (likely(!page)) {
1447-
page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1448-
if (unlikely(!page)) {
1449-
rx_ring->rx_stats.alloc_rx_page_failed++;
1450-
return false;
1451-
}
1452-
bi->page = page;
1446+
page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
1447+
if (unlikely(!page)) {
1448+
rx_ring->rx_stats.alloc_rx_page_failed++;
1449+
return false;
14531450
}
14541451

14551452
/* map page for use */
@@ -1462,13 +1459,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
14621459
*/
14631460
if (dma_mapping_error(rx_ring->dev, dma)) {
14641461
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
1465-
bi->page = NULL;
14661462

14671463
rx_ring->rx_stats.alloc_rx_page_failed++;
14681464
return false;
14691465
}
14701466

14711467
bi->dma = dma;
1468+
bi->page = page;
14721469
bi->page_offset = 0;
14731470

14741471
return true;
@@ -1512,8 +1509,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
15121509
i -= rx_ring->count;
15131510
}
15141511

1515-
/* clear the hdr_addr for the next_to_use descriptor */
1516-
rx_desc->read.hdr_addr = 0;
1512+
/* clear the status bits for the next_to_use descriptor */
1513+
rx_desc->wb.upper.status_error = 0;
15171514

15181515
cleaned_count--;
15191516
} while (cleaned_count);
@@ -1798,9 +1795,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
17981795
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
17991796

18001797
/* transfer page from old buffer to new buffer */
1801-
new_buff->page = old_buff->page;
1802-
new_buff->dma = old_buff->dma;
1803-
new_buff->page_offset = old_buff->page_offset;
1798+
*new_buff = *old_buff;
18041799

18051800
/* sync the buffer for use by the device */
18061801
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
@@ -1809,6 +1804,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
18091804
DMA_FROM_DEVICE);
18101805
}
18111806

1807+
static inline bool ixgbe_page_is_reserved(struct page *page)
1808+
{
1809+
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
1810+
}
1811+
18121812
/**
18131813
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
18141814
* @rx_ring: rx descriptor ring to transact packets on
@@ -1844,20 +1844,20 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
18441844

18451845
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
18461846

1847-
/* we can reuse buffer as-is, just make sure it is local */
1848-
if (likely(page_to_nid(page) == numa_node_id()))
1847+
/* page is not reserved, we can reuse buffer as-is */
1848+
if (likely(!ixgbe_page_is_reserved(page)))
18491849
return true;
18501850

18511851
/* this page cannot be reused so discard it */
1852-
put_page(page);
1852+
__free_pages(page, ixgbe_rx_pg_order(rx_ring));
18531853
return false;
18541854
}
18551855

18561856
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
18571857
rx_buffer->page_offset, size, truesize);
18581858

18591859
/* avoid re-using remote pages */
1860-
if (unlikely(page_to_nid(page) != numa_node_id()))
1860+
if (unlikely(ixgbe_page_is_reserved(page)))
18611861
return false;
18621862

18631863
#if (PAGE_SIZE < 8192)
@@ -1867,22 +1867,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
18671867

18681868
/* flip page offset to other buffer */
18691869
rx_buffer->page_offset ^= truesize;
1870-
1871-
/* Even if we own the page, we are not allowed to use atomic_set()
1872-
* This would break get_page_unless_zero() users.
1873-
*/
1874-
atomic_inc(&page->_count);
18751870
#else
18761871
/* move offset up to the next cache line */
18771872
rx_buffer->page_offset += truesize;
18781873

18791874
if (rx_buffer->page_offset > last_offset)
18801875
return false;
1881-
1882-
/* bump ref count on page before it is given to the stack */
1883-
get_page(page);
18841876
#endif
18851877

1878+
/* Even if we own the page, we are not allowed to use atomic_set()
1879+
* This would break get_page_unless_zero() users.
1880+
*/
1881+
atomic_inc(&page->_count);
1882+
18861883
return true;
18871884
}
18881885

@@ -1945,6 +1942,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
19451942
rx_buffer->page_offset,
19461943
ixgbe_rx_bufsz(rx_ring),
19471944
DMA_FROM_DEVICE);
1945+
1946+
rx_buffer->skb = NULL;
19481947
}
19491948

19501949
/* pull page into skb */
@@ -1962,8 +1961,6 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
19621961
}
19631962

19641963
/* clear contents of buffer_info */
1965-
rx_buffer->skb = NULL;
1966-
rx_buffer->dma = 0;
19671964
rx_buffer->page = NULL;
19681965

19691966
return skb;
@@ -4344,29 +4341,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
43444341

43454342
/* Free all the Rx ring sk_buffs */
43464343
for (i = 0; i < rx_ring->count; i++) {
4347-
struct ixgbe_rx_buffer *rx_buffer;
4344+
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
43484345

4349-
rx_buffer = &rx_ring->rx_buffer_info[i];
43504346
if (rx_buffer->skb) {
43514347
struct sk_buff *skb = rx_buffer->skb;
4352-
if (IXGBE_CB(skb)->page_released) {
4348+
if (IXGBE_CB(skb)->page_released)
43534349
dma_unmap_page(dev,
43544350
IXGBE_CB(skb)->dma,
43554351
ixgbe_rx_bufsz(rx_ring),
43564352
DMA_FROM_DEVICE);
4357-
IXGBE_CB(skb)->page_released = false;
4358-
}
43594353
dev_kfree_skb(skb);
43604354
rx_buffer->skb = NULL;
43614355
}
4362-
if (rx_buffer->dma)
4363-
dma_unmap_page(dev, rx_buffer->dma,
4364-
ixgbe_rx_pg_size(rx_ring),
4365-
DMA_FROM_DEVICE);
4366-
rx_buffer->dma = 0;
4367-
if (rx_buffer->page)
4368-
__free_pages(rx_buffer->page,
4369-
ixgbe_rx_pg_order(rx_ring));
4356+
4357+
if (!rx_buffer->page)
4358+
continue;
4359+
4360+
dma_unmap_page(dev, rx_buffer->dma,
4361+
ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
4362+
__free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
4363+
43704364
rx_buffer->page = NULL;
43714365
}
43724366

0 commit comments

Comments
 (0)