Skip to content

Commit de78d1f

Browse files
Alexander DuyckJeff Kirsher
authored andcommitted
igb: Lock buffer size at 2K even on systems with larger pages
This change locks us in at 2K buffers even on a system that supports larger frames. The reason for this change is to make better use of pages and to reduce the overall truesize of frames generated by igb. Signed-off-by: Alexander Duyck <[email protected]> Tested-by: Aaron Brown <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent 2e334ee commit de78d1f

File tree

3 files changed

+23
-15
lines changed

3 files changed

+23
-15
lines changed

drivers/net/ethernet/intel/igb/igb.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -132,9 +132,10 @@ struct vf_data_storage {
132132
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
133133

134134
/* Supported Rx Buffer Sizes */
135-
#define IGB_RXBUFFER_256 256
136-
#define IGB_RXBUFFER_16384 16384
137-
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
135+
#define IGB_RXBUFFER_256 256
136+
#define IGB_RXBUFFER_2048 2048
137+
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
138+
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
138139

139140
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
140141
#define IGB_TX_QUEUE_WAKE 16

drivers/net/ethernet/intel/igb/igb_ethtool.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1727,7 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
17271727
/* sync Rx buffer for CPU read */
17281728
dma_sync_single_for_cpu(rx_ring->dev,
17291729
rx_buffer_info->dma,
1730-
PAGE_SIZE / 2,
1730+
IGB_RX_BUFSZ,
17311731
DMA_FROM_DEVICE);
17321732

17331733
/* verify contents of skb */
@@ -1737,7 +1737,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
17371737
/* sync Rx buffer for device write */
17381738
dma_sync_single_for_device(rx_ring->dev,
17391739
rx_buffer_info->dma,
1740-
PAGE_SIZE / 2,
1740+
IGB_RX_BUFSZ,
17411741
DMA_FROM_DEVICE);
17421742

17431743
/* unmap buffer on tx side */

drivers/net/ethernet/intel/igb/igb_main.c

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -554,7 +554,7 @@ static void igb_dump(struct igb_adapter *adapter)
554554
16, 1,
555555
page_address(buffer_info->page) +
556556
buffer_info->page_offset,
557-
PAGE_SIZE/2, true);
557+
IGB_RX_BUFSZ, true);
558558
}
559559
}
560560
}
@@ -3103,11 +3103,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
31033103

31043104
/* set descriptor configuration */
31053105
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3106-
#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3107-
srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3108-
#else
3109-
srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3110-
#endif
3106+
srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
31113107
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
31123108
#ifdef CONFIG_IGB_PTP
31133109
if (hw->mac.type >= e1000_82580)
@@ -5855,7 +5851,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
58555851
/* sync the buffer for use by the device */
58565852
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
58575853
old_buff->page_offset,
5858-
PAGE_SIZE / 2,
5854+
IGB_RX_BUFSZ,
58595855
DMA_FROM_DEVICE);
58605856
}
58615857

@@ -5905,25 +5901,36 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
59055901
}
59065902

59075903
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
5908-
rx_buffer->page_offset, size, PAGE_SIZE / 2);
5904+
rx_buffer->page_offset, size, IGB_RX_BUFSZ);
59095905

59105906
/* avoid re-using remote pages */
59115907
if (unlikely(page_to_nid(page) != numa_node_id()))
59125908
return false;
59135909

5910+
#if (PAGE_SIZE < 8192)
59145911
/* if we are only owner of page we can reuse it */
59155912
if (unlikely(page_count(page) != 1))
59165913
return false;
59175914

59185915
/* flip page offset to other buffer */
5919-
rx_buffer->page_offset ^= PAGE_SIZE / 2;
5916+
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
59205917

59215918
/*
59225919
* since we are the only owner of the page and we need to
59235920
* increment it, just set the value to 2 in order to avoid
59245921
* an unnecessary locked operation
59255922
*/
59265923
atomic_set(&page->_count, 2);
5924+
#else
5925+
/* move offset up to the next cache line */
5926+
rx_buffer->page_offset += SKB_DATA_ALIGN(size);
5927+
5928+
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
5929+
return false;
5930+
5931+
/* bump ref count on page before it is given to the stack */
5932+
get_page(page);
5933+
#endif
59275934

59285935
return true;
59295936
}
@@ -5977,7 +5984,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
59775984
dma_sync_single_range_for_cpu(rx_ring->dev,
59785985
rx_buffer->dma,
59795986
rx_buffer->page_offset,
5980-
PAGE_SIZE / 2,
5987+
IGB_RX_BUFSZ,
59815988
DMA_FROM_DEVICE);
59825989

59835990
/* pull page into skb */

0 commit comments

Comments
 (0)