Skip to content

Commit f15c5ba

Browse files
etantilovJeff Kirsher
authored andcommitted
ixgbevf: add support for using order 1 pages to receive large frames
Based on commit 8649aae ("igb: Add support for using order 1 pages to receive large frames") Add support for using 3K buffers in order 1 page. We are reserving 1K for now to have space available for future tail room and head room when we enable build_skb support. Signed-off-by: Emil Tantilov <[email protected]> Tested-by: Krishneil Singh <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent bc04347 commit f15c5ba

File tree

2 files changed

+92
-24
lines changed

2 files changed

+92
-24
lines changed

drivers/net/ethernet/intel/ixgbevf/ixgbevf.h

Lines changed: 44 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -89,17 +89,11 @@ struct ixgbevf_rx_queue_stats {
8989
};
9090

9191
enum ixgbevf_ring_state_t {
92+
__IXGBEVF_RX_3K_BUFFER,
9293
__IXGBEVF_TX_DETECT_HANG,
9394
__IXGBEVF_HANG_CHECK_ARMED,
9495
};
9596

96-
#define check_for_tx_hang(ring) \
97-
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
98-
#define set_check_for_tx_hang(ring) \
99-
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
100-
#define clear_check_for_tx_hang(ring) \
101-
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
102-
10397
struct ixgbevf_ring {
10498
struct ixgbevf_ring *next;
10599
struct net_device *netdev;
@@ -156,12 +150,20 @@ struct ixgbevf_ring {
156150
/* Supported Rx Buffer Sizes */
157151
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
158152
#define IXGBEVF_RXBUFFER_2048 2048
153+
#define IXGBEVF_RXBUFFER_3072 3072
159154

160155
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
161-
#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
162156

163157
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
164158

159+
#define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
160+
#if (PAGE_SIZE < 8192)
161+
#define IXGBEVF_MAX_FRAME_BUILD_SKB \
162+
(SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD)
163+
#else
164+
#define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048
165+
#endif
166+
165167
#define IXGBE_TX_FLAGS_CSUM BIT(0)
166168
#define IXGBE_TX_FLAGS_VLAN BIT(1)
167169
#define IXGBE_TX_FLAGS_TSO BIT(2)
@@ -170,6 +172,40 @@ struct ixgbevf_ring {
170172
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
171173
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
172174

175+
#define ring_uses_large_buffer(ring) \
176+
test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
177+
#define set_ring_uses_large_buffer(ring) \
178+
set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
179+
#define clear_ring_uses_large_buffer(ring) \
180+
clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
181+
182+
static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
183+
{
184+
#if (PAGE_SIZE < 8192)
185+
if (ring_uses_large_buffer(ring))
186+
return IXGBEVF_RXBUFFER_3072;
187+
#endif
188+
return IXGBEVF_RXBUFFER_2048;
189+
}
190+
191+
static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
192+
{
193+
#if (PAGE_SIZE < 8192)
194+
if (ring_uses_large_buffer(ring))
195+
return 1;
196+
#endif
197+
return 0;
198+
}
199+
200+
#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
201+
202+
#define check_for_tx_hang(ring) \
203+
test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
204+
#define set_check_for_tx_hang(ring) \
205+
set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
206+
#define clear_check_for_tx_hang(ring) \
207+
clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
208+
173209
struct ixgbevf_ring_container {
174210
struct ixgbevf_ring *ring; /* pointer to linked list of rings */
175211
unsigned int total_bytes; /* total bytes processed this int */

drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

Lines changed: 48 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -565,21 +565,22 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
565565
return true;
566566

567567
/* alloc new page for storage */
568-
page = dev_alloc_page();
568+
page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
569569
if (unlikely(!page)) {
570570
rx_ring->rx_stats.alloc_rx_page_failed++;
571571
return false;
572572
}
573573

574574
/* map page for use */
575-
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
575+
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
576+
ixgbevf_rx_pg_size(rx_ring),
576577
DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
577578

578579
/* if mapping failed free memory back to system since
579580
* there isn't much point in holding memory we can't use
580581
*/
581582
if (dma_mapping_error(rx_ring->dev, dma)) {
582-
__free_page(page);
583+
__free_pages(page, ixgbevf_rx_pg_order(rx_ring));
583584

584585
rx_ring->rx_stats.alloc_rx_page_failed++;
585586
return false;
@@ -621,7 +622,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
621622
/* sync the buffer for use by the device */
622623
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
623624
bi->page_offset,
624-
IXGBEVF_RX_BUFSZ,
625+
ixgbevf_rx_bufsz(rx_ring),
625626
DMA_FROM_DEVICE);
626627

627628
/* Refresh the desc even if pkt_addr didn't change
@@ -750,13 +751,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
750751
return false;
751752

752753
/* flip page offset to other buffer */
753-
rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
754+
rx_buffer->page_offset ^= truesize;
754755

755756
#else
756757
/* move offset up to the next cache line */
757758
rx_buffer->page_offset += truesize;
758759

759-
if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
760+
#define IXGBEVF_LAST_OFFSET \
761+
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
762+
763+
if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
760764
return false;
761765

762766
#endif
@@ -797,7 +801,7 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
797801
struct page *page = rx_buffer->page;
798802
void *va = page_address(page) + rx_buffer->page_offset;
799803
#if (PAGE_SIZE < 8192)
800-
unsigned int truesize = IXGBEVF_RX_BUFSZ;
804+
unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
801805
#else
802806
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
803807
#endif
@@ -888,8 +892,8 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
888892
* any references we are holding to it
889893
*/
890894
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
891-
PAGE_SIZE, DMA_FROM_DEVICE,
892-
IXGBEVF_RX_DMA_ATTR);
895+
ixgbevf_rx_pg_size(rx_ring),
896+
DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
893897
__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
894898
}
895899

@@ -1586,15 +1590,19 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
15861590

15871591
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
15881592

1589-
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1593+
static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
1594+
struct ixgbevf_ring *ring, int index)
15901595
{
15911596
struct ixgbe_hw *hw = &adapter->hw;
15921597
u32 srrctl;
15931598

15941599
srrctl = IXGBE_SRRCTL_DROP_EN;
15951600

15961601
srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1597-
srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1602+
if (ring_uses_large_buffer(ring))
1603+
srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1604+
else
1605+
srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
15981606
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
15991607

16001608
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
@@ -1766,7 +1774,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
17661774
ring->next_to_use = 0;
17671775
ring->next_to_alloc = 0;
17681776

1769-
ixgbevf_configure_srrctl(adapter, reg_idx);
1777+
ixgbevf_configure_srrctl(adapter, ring, reg_idx);
17701778

17711779
/* allow any size packet since we can handle overflow */
17721780
rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
@@ -1778,6 +1786,26 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
17781786
ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
17791787
}
17801788

1789+
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
1790+
struct ixgbevf_ring *rx_ring)
1791+
{
1792+
struct net_device *netdev = adapter->netdev;
1793+
unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1794+
1795+
/* set build_skb and buffer size flags */
1796+
clear_ring_uses_large_buffer(rx_ring);
1797+
1798+
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
1799+
return;
1800+
1801+
#if (PAGE_SIZE < 8192)
1802+
if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
1803+
return;
1804+
1805+
set_ring_uses_large_buffer(rx_ring);
1806+
#endif
1807+
}
1808+
17811809
/**
17821810
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
17831811
* @adapter: board private structure
@@ -1805,8 +1833,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
18051833
/* Setup the HW Rx Head and Tail Descriptor Pointers and
18061834
* the Base and Length of the Rx Descriptor Ring
18071835
*/
1808-
for (i = 0; i < adapter->num_rx_queues; i++)
1809-
ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1836+
for (i = 0; i < adapter->num_rx_queues; i++) {
1837+
struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
1838+
1839+
ixgbevf_set_rx_buffer_len(adapter, rx_ring);
1840+
ixgbevf_configure_rx_ring(adapter, rx_ring);
1841+
}
18101842
}
18111843

18121844
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -2135,13 +2167,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
21352167
dma_sync_single_range_for_cpu(rx_ring->dev,
21362168
rx_buffer->dma,
21372169
rx_buffer->page_offset,
2138-
IXGBEVF_RX_BUFSZ,
2170+
ixgbevf_rx_bufsz(rx_ring),
21392171
DMA_FROM_DEVICE);
21402172

21412173
/* free resources associated with mapping */
21422174
dma_unmap_page_attrs(rx_ring->dev,
21432175
rx_buffer->dma,
2144-
PAGE_SIZE,
2176+
ixgbevf_rx_pg_size(rx_ring),
21452177
DMA_FROM_DEVICE,
21462178
IXGBEVF_RX_DMA_ATTR);
21472179

0 commit comments

Comments
 (0)