@@ -1436,20 +1436,17 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1436
1436
struct ixgbe_rx_buffer * bi )
1437
1437
{
1438
1438
struct page * page = bi -> page ;
1439
- dma_addr_t dma = bi -> dma ;
1439
+ dma_addr_t dma ;
1440
1440
1441
1441
/* since we are recycling buffers we should seldom need to alloc */
1442
- if (likely (dma ))
1442
+ if (likely (page ))
1443
1443
return true;
1444
1444
1445
1445
/* alloc new page for storage */
1446
- if (likely (!page )) {
1447
- page = dev_alloc_pages (ixgbe_rx_pg_order (rx_ring ));
1448
- if (unlikely (!page )) {
1449
- rx_ring -> rx_stats .alloc_rx_page_failed ++ ;
1450
- return false;
1451
- }
1452
- bi -> page = page ;
1446
+ page = dev_alloc_pages (ixgbe_rx_pg_order (rx_ring ));
1447
+ if (unlikely (!page )) {
1448
+ rx_ring -> rx_stats .alloc_rx_page_failed ++ ;
1449
+ return false;
1453
1450
}
1454
1451
1455
1452
/* map page for use */
@@ -1462,13 +1459,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1462
1459
*/
1463
1460
if (dma_mapping_error (rx_ring -> dev , dma )) {
1464
1461
__free_pages (page , ixgbe_rx_pg_order (rx_ring ));
1465
- bi -> page = NULL ;
1466
1462
1467
1463
rx_ring -> rx_stats .alloc_rx_page_failed ++ ;
1468
1464
return false;
1469
1465
}
1470
1466
1471
1467
bi -> dma = dma ;
1468
+ bi -> page = page ;
1472
1469
bi -> page_offset = 0 ;
1473
1470
1474
1471
return true;
@@ -1512,8 +1509,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1512
1509
i -= rx_ring -> count ;
1513
1510
}
1514
1511
1515
- /* clear the hdr_addr for the next_to_use descriptor */
1516
- rx_desc -> read . hdr_addr = 0 ;
1512
+ /* clear the status bits for the next_to_use descriptor */
1513
+ rx_desc -> wb . upper . status_error = 0 ;
1517
1514
1518
1515
cleaned_count -- ;
1519
1516
} while (cleaned_count );
@@ -1798,9 +1795,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1798
1795
rx_ring -> next_to_alloc = (nta < rx_ring -> count ) ? nta : 0 ;
1799
1796
1800
1797
/* transfer page from old buffer to new buffer */
1801
- new_buff -> page = old_buff -> page ;
1802
- new_buff -> dma = old_buff -> dma ;
1803
- new_buff -> page_offset = old_buff -> page_offset ;
1798
+ * new_buff = * old_buff ;
1804
1799
1805
1800
/* sync the buffer for use by the device */
1806
1801
dma_sync_single_range_for_device (rx_ring -> dev , new_buff -> dma ,
@@ -1809,6 +1804,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1809
1804
DMA_FROM_DEVICE );
1810
1805
}
1811
1806
1807
+ static inline bool ixgbe_page_is_reserved (struct page * page )
1808
+ {
1809
+ return (page_to_nid (page ) != numa_mem_id ()) || page -> pfmemalloc ;
1810
+ }
1811
+
1812
1812
/**
1813
1813
* ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
1814
1814
* @rx_ring: rx descriptor ring to transact packets on
@@ -1844,20 +1844,20 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1844
1844
1845
1845
memcpy (__skb_put (skb , size ), va , ALIGN (size , sizeof (long )));
1846
1846
1847
- /* we can reuse buffer as-is, just make sure it is local */
1848
- if (likely (page_to_nid (page ) == numa_node_id ( )))
1847
+ /* page is not reserved, we can reuse buffer as-is */
1848
+ if (likely (! ixgbe_page_is_reserved (page )))
1849
1849
return true;
1850
1850
1851
1851
/* this page cannot be reused so discard it */
1852
- put_page (page );
1852
+ __free_pages (page , ixgbe_rx_pg_order ( rx_ring ) );
1853
1853
return false;
1854
1854
}
1855
1855
1856
1856
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags , page ,
1857
1857
rx_buffer -> page_offset , size , truesize );
1858
1858
1859
1859
/* avoid re-using remote pages */
1860
- if (unlikely (page_to_nid (page ) != numa_node_id ( )))
1860
+ if (unlikely (ixgbe_page_is_reserved (page )))
1861
1861
return false;
1862
1862
1863
1863
#if (PAGE_SIZE < 8192 )
@@ -1867,22 +1867,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
1867
1867
1868
1868
/* flip page offset to other buffer */
1869
1869
rx_buffer -> page_offset ^= truesize ;
1870
-
1871
- /* Even if we own the page, we are not allowed to use atomic_set()
1872
- * This would break get_page_unless_zero() users.
1873
- */
1874
- atomic_inc (& page -> _count );
1875
1870
#else
1876
1871
/* move offset up to the next cache line */
1877
1872
rx_buffer -> page_offset += truesize ;
1878
1873
1879
1874
if (rx_buffer -> page_offset > last_offset )
1880
1875
return false;
1881
-
1882
- /* bump ref count on page before it is given to the stack */
1883
- get_page (page );
1884
1876
#endif
1885
1877
1878
+ /* Even if we own the page, we are not allowed to use atomic_set()
1879
+ * This would break get_page_unless_zero() users.
1880
+ */
1881
+ atomic_inc (& page -> _count );
1882
+
1886
1883
return true;
1887
1884
}
1888
1885
@@ -1945,6 +1942,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1945
1942
rx_buffer -> page_offset ,
1946
1943
ixgbe_rx_bufsz (rx_ring ),
1947
1944
DMA_FROM_DEVICE );
1945
+
1946
+ rx_buffer -> skb = NULL ;
1948
1947
}
1949
1948
1950
1949
/* pull page into skb */
@@ -1962,8 +1961,6 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
1962
1961
}
1963
1962
1964
1963
/* clear contents of buffer_info */
1965
- rx_buffer -> skb = NULL ;
1966
- rx_buffer -> dma = 0 ;
1967
1964
rx_buffer -> page = NULL ;
1968
1965
1969
1966
return skb ;
@@ -4344,29 +4341,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4344
4341
4345
4342
/* Free all the Rx ring sk_buffs */
4346
4343
for (i = 0 ; i < rx_ring -> count ; i ++ ) {
4347
- struct ixgbe_rx_buffer * rx_buffer ;
4344
+ struct ixgbe_rx_buffer * rx_buffer = & rx_ring -> rx_buffer_info [ i ] ;
4348
4345
4349
- rx_buffer = & rx_ring -> rx_buffer_info [i ];
4350
4346
if (rx_buffer -> skb ) {
4351
4347
struct sk_buff * skb = rx_buffer -> skb ;
4352
- if (IXGBE_CB (skb )-> page_released ) {
4348
+ if (IXGBE_CB (skb )-> page_released )
4353
4349
dma_unmap_page (dev ,
4354
4350
IXGBE_CB (skb )-> dma ,
4355
4351
ixgbe_rx_bufsz (rx_ring ),
4356
4352
DMA_FROM_DEVICE );
4357
- IXGBE_CB (skb )-> page_released = false;
4358
- }
4359
4353
dev_kfree_skb (skb );
4360
4354
rx_buffer -> skb = NULL ;
4361
4355
}
4362
- if ( rx_buffer -> dma )
4363
- dma_unmap_page ( dev , rx_buffer -> dma ,
4364
- ixgbe_rx_pg_size ( rx_ring ),
4365
- DMA_FROM_DEVICE );
4366
- rx_buffer -> dma = 0 ;
4367
- if ( rx_buffer -> page )
4368
- __free_pages (rx_buffer -> page ,
4369
- ixgbe_rx_pg_order ( rx_ring ));
4356
+
4357
+ if (! rx_buffer -> page )
4358
+ continue ;
4359
+
4360
+ dma_unmap_page ( dev , rx_buffer -> dma ,
4361
+ ixgbe_rx_pg_size ( rx_ring ), DMA_FROM_DEVICE );
4362
+ __free_pages (rx_buffer -> page , ixgbe_rx_pg_order ( rx_ring ));
4363
+
4370
4364
rx_buffer -> page = NULL ;
4371
4365
}
4372
4366
0 commit comments