Skip to content

Commit 8649aae

Browse files
Alexander DuyckJeff Kirsher
authored andcommitted
igb: Add support for using order 1 pages to receive large frames
This patch adds support for using 3K buffers in order 1 pages the same way we were using 2K buffers in 4K pages. We are reserving 1K of room for now to have space available for future headroom and tailroom when we enable build_skb support. One side effect of this patch is that we can end up using a larger buffer if jumbo frames is enabled. The impact shouldn't be too great, but it could hurt small packet performance for UDP workloads if jumbo frames is enabled as the truesize of frames will be larger. Signed-off-by: Alexander Duyck <[email protected]> Tested-by: Aaron Brown <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent e089129 commit 8649aae

File tree

2 files changed

+76
-18
lines changed

2 files changed

+76
-18
lines changed

drivers/net/ethernet/intel/igb/igb.h

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,9 @@ struct vf_data_storage {
142142
/* Supported Rx Buffer Sizes */
143143
#define IGB_RXBUFFER_256 256
144144
#define IGB_RXBUFFER_2048 2048
145+
#define IGB_RXBUFFER_3072 3072
145146
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
146147
#define IGB_TS_HDR_LEN 16
147-
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
148148

149149
#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
150150
#if (PAGE_SIZE < 8192)
@@ -313,12 +313,40 @@ struct igb_q_vector {
313313
};
314314

315315
enum e1000_ring_flags_t {
316+
IGB_RING_FLAG_RX_3K_BUFFER,
316317
IGB_RING_FLAG_RX_SCTP_CSUM,
317318
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
318319
IGB_RING_FLAG_TX_CTX_IDX,
319320
IGB_RING_FLAG_TX_DETECT_HANG
320321
};
321322

323+
#define ring_uses_large_buffer(ring) \
324+
test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
325+
#define set_ring_uses_large_buffer(ring) \
326+
set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
327+
#define clear_ring_uses_large_buffer(ring) \
328+
clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
329+
330+
static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
331+
{
332+
#if (PAGE_SIZE < 8192)
333+
if (ring_uses_large_buffer(ring))
334+
return IGB_RXBUFFER_3072;
335+
#endif
336+
return IGB_RXBUFFER_2048;
337+
}
338+
339+
static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
340+
{
341+
#if (PAGE_SIZE < 8192)
342+
if (ring_uses_large_buffer(ring))
343+
return 1;
344+
#endif
345+
return 0;
346+
}
347+
348+
#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
349+
322350
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
323351

324352
#define IGB_RX_DESC(R, i) \

drivers/net/ethernet/intel/igb/igb_main.c

Lines changed: 47 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,7 @@ static void igb_dump(struct igb_adapter *adapter)
554554
16, 1,
555555
page_address(buffer_info->page) +
556556
buffer_info->page_offset,
557-
IGB_RX_BUFSZ, true);
557+
igb_rx_bufsz(rx_ring), true);
558558
}
559559
}
560560
}
@@ -3746,7 +3746,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
37463746

37473747
/* set descriptor configuration */
37483748
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3749-
srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3749+
if (ring_uses_large_buffer(ring))
3750+
srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3751+
else
3752+
srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
37503753
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
37513754
if (hw->mac.type >= e1000_82580)
37523755
srrctl |= E1000_SRRCTL_TIMESTAMP;
@@ -3776,6 +3779,23 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
37763779
wr32(E1000_RXDCTL(reg_idx), rxdctl);
37773780
}
37783781

3782+
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
3783+
struct igb_ring *rx_ring)
3784+
{
3785+
/* set build_skb and buffer size flags */
3786+
clear_ring_uses_large_buffer(rx_ring);
3787+
3788+
if (adapter->flags & IGB_FLAG_RX_LEGACY)
3789+
return;
3790+
3791+
#if (PAGE_SIZE < 8192)
3792+
if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
3793+
return;
3794+
3795+
set_ring_uses_large_buffer(rx_ring);
3796+
#endif
3797+
}
3798+
37793799
/**
37803800
* igb_configure_rx - Configure receive Unit after Reset
37813801
* @adapter: board private structure
@@ -3793,8 +3813,12 @@ static void igb_configure_rx(struct igb_adapter *adapter)
37933813
/* Setup the HW Rx Head and Tail Descriptor Pointers and
37943814
* the Base and Length of the Rx Descriptor Ring
37953815
*/
3796-
for (i = 0; i < adapter->num_rx_queues; i++)
3797-
igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
3816+
for (i = 0; i < adapter->num_rx_queues; i++) {
3817+
struct igb_ring *rx_ring = adapter->rx_ring[i];
3818+
3819+
igb_set_rx_buffer_len(adapter, rx_ring);
3820+
igb_configure_rx_ring(adapter, rx_ring);
3821+
}
37983822
}
37993823

38003824
/**
@@ -3969,13 +3993,13 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
39693993
dma_sync_single_range_for_cpu(rx_ring->dev,
39703994
buffer_info->dma,
39713995
buffer_info->page_offset,
3972-
IGB_RX_BUFSZ,
3996+
igb_rx_bufsz(rx_ring),
39733997
DMA_FROM_DEVICE);
39743998

39753999
/* free resources associated with mapping */
39764000
dma_unmap_page_attrs(rx_ring->dev,
39774001
buffer_info->dma,
3978-
PAGE_SIZE,
4002+
igb_rx_pg_size(rx_ring),
39794003
DMA_FROM_DEVICE,
39804004
IGB_RX_DMA_ATTR);
39814005
__page_frag_cache_drain(buffer_info->page,
@@ -6870,7 +6894,7 @@ static inline bool igb_page_is_reserved(struct page *page)
68706894

68716895
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
68726896
struct page *page,
6873-
unsigned int truesize)
6897+
const unsigned int truesize)
68746898
{
68756899
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
68766900

@@ -6884,12 +6908,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
68846908
return false;
68856909

68866910
/* flip page offset to other buffer */
6887-
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6911+
rx_buffer->page_offset ^= truesize;
68886912
#else
68896913
/* move offset up to the next cache line */
68906914
rx_buffer->page_offset += truesize;
6915+
#define IGB_LAST_OFFSET \
6916+
(SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
68916917

6892-
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6918+
if (rx_buffer->page_offset > IGB_LAST_OFFSET)
68936919
return false;
68946920
#endif
68956921

@@ -6929,7 +6955,7 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
69296955
struct page *page = rx_buffer->page;
69306956
void *va = page_address(page) + rx_buffer->page_offset;
69316957
#if (PAGE_SIZE < 8192)
6932-
unsigned int truesize = IGB_RX_BUFSZ;
6958+
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
69336959
#else
69346960
unsigned int truesize = SKB_DATA_ALIGN(size);
69356961
#endif
@@ -7025,7 +7051,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
70257051
* any references we are holding to it
70267052
*/
70277053
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
7028-
PAGE_SIZE, DMA_FROM_DEVICE,
7054+
igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
70297055
IGB_RX_DMA_ATTR);
70307056
__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
70317057
}
@@ -7278,21 +7304,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
72787304
return true;
72797305

72807306
/* alloc new page for storage */
7281-
page = dev_alloc_page();
7307+
page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
72827308
if (unlikely(!page)) {
72837309
rx_ring->rx_stats.alloc_failed++;
72847310
return false;
72857311
}
72867312

72877313
/* map page for use */
7288-
dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
7289-
DMA_FROM_DEVICE, IGB_RX_DMA_ATTR);
7314+
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
7315+
igb_rx_pg_size(rx_ring),
7316+
DMA_FROM_DEVICE,
7317+
IGB_RX_DMA_ATTR);
72907318

72917319
/* if mapping failed free memory back to system since
72927320
* there isn't much point in holding memory we can't use
72937321
*/
72947322
if (dma_mapping_error(rx_ring->dev, dma)) {
7295-
__free_page(page);
7323+
__free_pages(page, igb_rx_pg_order(rx_ring));
72967324

72977325
rx_ring->rx_stats.alloc_failed++;
72987326
return false;
@@ -7315,6 +7343,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
73157343
union e1000_adv_rx_desc *rx_desc;
73167344
struct igb_rx_buffer *bi;
73177345
u16 i = rx_ring->next_to_use;
7346+
u16 bufsz;
73187347

73197348
/* nothing to do */
73207349
if (!cleaned_count)
@@ -7324,14 +7353,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
73247353
bi = &rx_ring->rx_buffer_info[i];
73257354
i -= rx_ring->count;
73267355

7356+
bufsz = igb_rx_bufsz(rx_ring);
7357+
73277358
do {
73287359
if (!igb_alloc_mapped_page(rx_ring, bi))
73297360
break;
73307361

73317362
/* sync the buffer for use by the device */
73327363
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
7333-
bi->page_offset,
7334-
IGB_RX_BUFSZ,
7364+
bi->page_offset, bufsz,
73357365
DMA_FROM_DEVICE);
73367366

73377367
/* Refresh the desc even if buffer_addrs didn't change

0 commit comments

Comments
 (0)