@@ -565,21 +565,22 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
565
565
return true;
566
566
567
567
/* alloc new page for storage */
568
- page = dev_alloc_page ( );
568
+ page = dev_alloc_pages ( ixgbevf_rx_pg_order ( rx_ring ) );
569
569
if (unlikely (!page )) {
570
570
rx_ring -> rx_stats .alloc_rx_page_failed ++ ;
571
571
return false;
572
572
}
573
573
574
574
/* map page for use */
575
- dma = dma_map_page_attrs (rx_ring -> dev , page , 0 , PAGE_SIZE ,
575
+ dma = dma_map_page_attrs (rx_ring -> dev , page , 0 ,
576
+ ixgbevf_rx_pg_size (rx_ring ),
576
577
DMA_FROM_DEVICE , IXGBEVF_RX_DMA_ATTR );
577
578
578
579
/* if mapping failed free memory back to system since
579
580
* there isn't much point in holding memory we can't use
580
581
*/
581
582
if (dma_mapping_error (rx_ring -> dev , dma )) {
582
- __free_page (page );
583
+ __free_pages (page , ixgbevf_rx_pg_order ( rx_ring ) );
583
584
584
585
rx_ring -> rx_stats .alloc_rx_page_failed ++ ;
585
586
return false;
@@ -621,7 +622,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
621
622
/* sync the buffer for use by the device */
622
623
dma_sync_single_range_for_device (rx_ring -> dev , bi -> dma ,
623
624
bi -> page_offset ,
624
- IXGBEVF_RX_BUFSZ ,
625
+ ixgbevf_rx_bufsz ( rx_ring ) ,
625
626
DMA_FROM_DEVICE );
626
627
627
628
/* Refresh the desc even if pkt_addr didn't change
@@ -750,13 +751,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
750
751
return false;
751
752
752
753
/* flip page offset to other buffer */
753
- rx_buffer -> page_offset ^= IXGBEVF_RX_BUFSZ ;
754
+ rx_buffer -> page_offset ^= truesize ;
754
755
755
756
#else
756
757
/* move offset up to the next cache line */
757
758
rx_buffer -> page_offset += truesize ;
758
759
759
- if (rx_buffer -> page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ ))
760
+ #define IXGBEVF_LAST_OFFSET \
761
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
762
+
763
+ if (rx_buffer -> page_offset > IXGBEVF_LAST_OFFSET )
760
764
return false;
761
765
762
766
#endif
@@ -797,7 +801,7 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
797
801
struct page * page = rx_buffer -> page ;
798
802
void * va = page_address (page ) + rx_buffer -> page_offset ;
799
803
#if (PAGE_SIZE < 8192 )
800
- unsigned int truesize = IXGBEVF_RX_BUFSZ ;
804
+ unsigned int truesize = ixgbevf_rx_pg_size ( rx_ring ) / 2 ;
801
805
#else
802
806
unsigned int truesize = ALIGN (size , L1_CACHE_BYTES );
803
807
#endif
@@ -888,8 +892,8 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
888
892
* any references we are holding to it
889
893
*/
890
894
dma_unmap_page_attrs (rx_ring -> dev , rx_buffer -> dma ,
891
- PAGE_SIZE , DMA_FROM_DEVICE ,
892
- IXGBEVF_RX_DMA_ATTR );
895
+ ixgbevf_rx_pg_size ( rx_ring ) ,
896
+ DMA_FROM_DEVICE , IXGBEVF_RX_DMA_ATTR );
893
897
__page_frag_cache_drain (page , rx_buffer -> pagecnt_bias );
894
898
}
895
899
@@ -1586,15 +1590,19 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1586
1590
1587
1591
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1588
1592
1589
- static void ixgbevf_configure_srrctl (struct ixgbevf_adapter * adapter , int index )
1593
+ static void ixgbevf_configure_srrctl (struct ixgbevf_adapter * adapter ,
1594
+ struct ixgbevf_ring * ring , int index )
1590
1595
{
1591
1596
struct ixgbe_hw * hw = & adapter -> hw ;
1592
1597
u32 srrctl ;
1593
1598
1594
1599
srrctl = IXGBE_SRRCTL_DROP_EN ;
1595
1600
1596
1601
srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT ;
1597
- srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
1602
+ if (ring_uses_large_buffer (ring ))
1603
+ srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
1604
+ else
1605
+ srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT ;
1598
1606
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF ;
1599
1607
1600
1608
IXGBE_WRITE_REG (hw , IXGBE_VFSRRCTL (index ), srrctl );
@@ -1766,7 +1774,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1766
1774
ring -> next_to_use = 0 ;
1767
1775
ring -> next_to_alloc = 0 ;
1768
1776
1769
- ixgbevf_configure_srrctl (adapter , reg_idx );
1777
+ ixgbevf_configure_srrctl (adapter , ring , reg_idx );
1770
1778
1771
1779
/* allow any size packet since we can handle overflow */
1772
1780
rxdctl &= ~IXGBE_RXDCTL_RLPML_EN ;
@@ -1778,6 +1786,26 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1778
1786
ixgbevf_alloc_rx_buffers (ring , ixgbevf_desc_unused (ring ));
1779
1787
}
1780
1788
1789
+ static void ixgbevf_set_rx_buffer_len (struct ixgbevf_adapter * adapter ,
1790
+ struct ixgbevf_ring * rx_ring )
1791
+ {
1792
+ struct net_device * netdev = adapter -> netdev ;
1793
+ unsigned int max_frame = netdev -> mtu + ETH_HLEN + ETH_FCS_LEN ;
1794
+
1795
+ /* set build_skb and buffer size flags */
1796
+ clear_ring_uses_large_buffer (rx_ring );
1797
+
1798
+ if (adapter -> flags & IXGBEVF_FLAGS_LEGACY_RX )
1799
+ return ;
1800
+
1801
+ #if (PAGE_SIZE < 8192 )
1802
+ if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB )
1803
+ return ;
1804
+
1805
+ set_ring_uses_large_buffer (rx_ring );
1806
+ #endif
1807
+ }
1808
+
1781
1809
/**
1782
1810
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1783
1811
* @adapter: board private structure
@@ -1805,8 +1833,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1805
1833
/* Setup the HW Rx Head and Tail Descriptor Pointers and
1806
1834
* the Base and Length of the Rx Descriptor Ring
1807
1835
*/
1808
- for (i = 0 ; i < adapter -> num_rx_queues ; i ++ )
1809
- ixgbevf_configure_rx_ring (adapter , adapter -> rx_ring [i ]);
1836
+ for (i = 0 ; i < adapter -> num_rx_queues ; i ++ ) {
1837
+ struct ixgbevf_ring * rx_ring = adapter -> rx_ring [i ];
1838
+
1839
+ ixgbevf_set_rx_buffer_len (adapter , rx_ring );
1840
+ ixgbevf_configure_rx_ring (adapter , rx_ring );
1841
+ }
1810
1842
}
1811
1843
1812
1844
static int ixgbevf_vlan_rx_add_vid (struct net_device * netdev ,
@@ -2135,13 +2167,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2135
2167
dma_sync_single_range_for_cpu (rx_ring -> dev ,
2136
2168
rx_buffer -> dma ,
2137
2169
rx_buffer -> page_offset ,
2138
- IXGBEVF_RX_BUFSZ ,
2170
+ ixgbevf_rx_bufsz ( rx_ring ) ,
2139
2171
DMA_FROM_DEVICE );
2140
2172
2141
2173
/* free resources associated with mapping */
2142
2174
dma_unmap_page_attrs (rx_ring -> dev ,
2143
2175
rx_buffer -> dma ,
2144
- PAGE_SIZE ,
2176
+ ixgbevf_rx_pg_size ( rx_ring ) ,
2145
2177
DMA_FROM_DEVICE ,
2146
2178
IXGBEVF_RX_DMA_ATTR );
2147
2179
0 commit comments