@@ -554,7 +554,7 @@ static void igb_dump(struct igb_adapter *adapter)
554
554
16 , 1 ,
555
555
page_address (buffer_info -> page ) +
556
556
buffer_info -> page_offset ,
557
- IGB_RX_BUFSZ , true);
557
+ igb_rx_bufsz ( rx_ring ) , true);
558
558
}
559
559
}
560
560
}
@@ -3746,7 +3746,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3746
3746
3747
3747
/* set descriptor configuration */
3748
3748
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT ;
3749
- srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT ;
3749
+ if (ring_uses_large_buffer (ring ))
3750
+ srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT ;
3751
+ else
3752
+ srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT ;
3750
3753
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF ;
3751
3754
if (hw -> mac .type >= e1000_82580 )
3752
3755
srrctl |= E1000_SRRCTL_TIMESTAMP ;
@@ -3776,6 +3779,23 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3776
3779
wr32 (E1000_RXDCTL (reg_idx ), rxdctl );
3777
3780
}
3778
3781
3782
+ static void igb_set_rx_buffer_len (struct igb_adapter * adapter ,
3783
+ struct igb_ring * rx_ring )
3784
+ {
3785
+ /* set build_skb and buffer size flags */
3786
+ clear_ring_uses_large_buffer (rx_ring );
3787
+
3788
+ if (adapter -> flags & IGB_FLAG_RX_LEGACY )
3789
+ return ;
3790
+
3791
+ #if (PAGE_SIZE < 8192 )
3792
+ if (adapter -> max_frame_size <= IGB_MAX_FRAME_BUILD_SKB )
3793
+ return ;
3794
+
3795
+ set_ring_uses_large_buffer (rx_ring );
3796
+ #endif
3797
+ }
3798
+
3779
3799
/**
3780
3800
* igb_configure_rx - Configure receive Unit after Reset
3781
3801
* @adapter: board private structure
@@ -3793,8 +3813,12 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3793
3813
/* Setup the HW Rx Head and Tail Descriptor Pointers and
3794
3814
* the Base and Length of the Rx Descriptor Ring
3795
3815
*/
3796
- for (i = 0 ; i < adapter -> num_rx_queues ; i ++ )
3797
- igb_configure_rx_ring (adapter , adapter -> rx_ring [i ]);
3816
+ for (i = 0 ; i < adapter -> num_rx_queues ; i ++ ) {
3817
+ struct igb_ring * rx_ring = adapter -> rx_ring [i ];
3818
+
3819
+ igb_set_rx_buffer_len (adapter , rx_ring );
3820
+ igb_configure_rx_ring (adapter , rx_ring );
3821
+ }
3798
3822
}
3799
3823
3800
3824
/**
@@ -3969,13 +3993,13 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3969
3993
dma_sync_single_range_for_cpu (rx_ring -> dev ,
3970
3994
buffer_info -> dma ,
3971
3995
buffer_info -> page_offset ,
3972
- IGB_RX_BUFSZ ,
3996
+ igb_rx_bufsz ( rx_ring ) ,
3973
3997
DMA_FROM_DEVICE );
3974
3998
3975
3999
/* free resources associated with mapping */
3976
4000
dma_unmap_page_attrs (rx_ring -> dev ,
3977
4001
buffer_info -> dma ,
3978
- PAGE_SIZE ,
4002
+ igb_rx_pg_size ( rx_ring ) ,
3979
4003
DMA_FROM_DEVICE ,
3980
4004
IGB_RX_DMA_ATTR );
3981
4005
__page_frag_cache_drain (buffer_info -> page ,
@@ -6870,7 +6894,7 @@ static inline bool igb_page_is_reserved(struct page *page)
6870
6894
6871
6895
static bool igb_can_reuse_rx_page (struct igb_rx_buffer * rx_buffer ,
6872
6896
struct page * page ,
6873
- unsigned int truesize )
6897
+ const unsigned int truesize )
6874
6898
{
6875
6899
unsigned int pagecnt_bias = rx_buffer -> pagecnt_bias -- ;
6876
6900
@@ -6884,12 +6908,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6884
6908
return false;
6885
6909
6886
6910
/* flip page offset to other buffer */
6887
- rx_buffer -> page_offset ^= IGB_RX_BUFSZ ;
6911
+ rx_buffer -> page_offset ^= truesize ;
6888
6912
#else
6889
6913
/* move offset up to the next cache line */
6890
6914
rx_buffer -> page_offset += truesize ;
6915
+ #define IGB_LAST_OFFSET \
6916
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
6891
6917
6892
- if (rx_buffer -> page_offset > ( PAGE_SIZE - IGB_RX_BUFSZ ) )
6918
+ if (rx_buffer -> page_offset > IGB_LAST_OFFSET )
6893
6919
return false;
6894
6920
#endif
6895
6921
@@ -6929,7 +6955,7 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6929
6955
struct page * page = rx_buffer -> page ;
6930
6956
void * va = page_address (page ) + rx_buffer -> page_offset ;
6931
6957
#if (PAGE_SIZE < 8192 )
6932
- unsigned int truesize = IGB_RX_BUFSZ ;
6958
+ unsigned int truesize = igb_rx_pg_size ( rx_ring ) / 2 ;
6933
6959
#else
6934
6960
unsigned int truesize = SKB_DATA_ALIGN (size );
6935
6961
#endif
@@ -7025,7 +7051,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
7025
7051
* any references we are holding to it
7026
7052
*/
7027
7053
dma_unmap_page_attrs (rx_ring -> dev , rx_buffer -> dma ,
7028
- PAGE_SIZE , DMA_FROM_DEVICE ,
7054
+ igb_rx_pg_size ( rx_ring ) , DMA_FROM_DEVICE ,
7029
7055
IGB_RX_DMA_ATTR );
7030
7056
__page_frag_cache_drain (page , rx_buffer -> pagecnt_bias );
7031
7057
}
@@ -7278,21 +7304,23 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7278
7304
return true;
7279
7305
7280
7306
/* alloc new page for storage */
7281
- page = dev_alloc_page ( );
7307
+ page = dev_alloc_pages ( igb_rx_pg_order ( rx_ring ) );
7282
7308
if (unlikely (!page )) {
7283
7309
rx_ring -> rx_stats .alloc_failed ++ ;
7284
7310
return false;
7285
7311
}
7286
7312
7287
7313
/* map page for use */
7288
- dma = dma_map_page_attrs (rx_ring -> dev , page , 0 , PAGE_SIZE ,
7289
- DMA_FROM_DEVICE , IGB_RX_DMA_ATTR );
7314
+ dma = dma_map_page_attrs (rx_ring -> dev , page , 0 ,
7315
+ igb_rx_pg_size (rx_ring ),
7316
+ DMA_FROM_DEVICE ,
7317
+ IGB_RX_DMA_ATTR );
7290
7318
7291
7319
/* if mapping failed free memory back to system since
7292
7320
* there isn't much point in holding memory we can't use
7293
7321
*/
7294
7322
if (dma_mapping_error (rx_ring -> dev , dma )) {
7295
- __free_page (page );
7323
+ __free_pages (page , igb_rx_pg_order ( rx_ring ) );
7296
7324
7297
7325
rx_ring -> rx_stats .alloc_failed ++ ;
7298
7326
return false;
@@ -7315,6 +7343,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7315
7343
union e1000_adv_rx_desc * rx_desc ;
7316
7344
struct igb_rx_buffer * bi ;
7317
7345
u16 i = rx_ring -> next_to_use ;
7346
+ u16 bufsz ;
7318
7347
7319
7348
/* nothing to do */
7320
7349
if (!cleaned_count )
@@ -7324,14 +7353,15 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7324
7353
bi = & rx_ring -> rx_buffer_info [i ];
7325
7354
i -= rx_ring -> count ;
7326
7355
7356
+ bufsz = igb_rx_bufsz (rx_ring );
7357
+
7327
7358
do {
7328
7359
if (!igb_alloc_mapped_page (rx_ring , bi ))
7329
7360
break ;
7330
7361
7331
7362
/* sync the buffer for use by the device */
7332
7363
dma_sync_single_range_for_device (rx_ring -> dev , bi -> dma ,
7333
- bi -> page_offset ,
7334
- IGB_RX_BUFSZ ,
7364
+ bi -> page_offset , bufsz ,
7335
7365
DMA_FROM_DEVICE );
7336
7366
7337
7367
/* Refresh the desc even if buffer_addrs didn't change
0 commit comments