@@ -793,7 +793,7 @@ struct fe_priv {
793
793
/* rx specific fields.
794
794
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
795
795
*/
796
- union ring_type get_rx , put_rx , first_rx , last_rx ;
796
+ union ring_type get_rx , put_rx , last_rx ;
797
797
struct nv_skb_map * get_rx_ctx , * put_rx_ctx ;
798
798
struct nv_skb_map * first_rx_ctx , * last_rx_ctx ;
799
799
struct nv_skb_map * rx_skb ;
@@ -1812,7 +1812,7 @@ static int nv_alloc_rx(struct net_device *dev)
1812
1812
struct ring_desc * less_rx ;
1813
1813
1814
1814
less_rx = np -> get_rx .orig ;
1815
- if (less_rx -- == np -> first_rx .orig )
1815
+ if (less_rx -- == np -> rx_ring .orig )
1816
1816
less_rx = np -> last_rx .orig ;
1817
1817
1818
1818
while (np -> put_rx .orig != less_rx ) {
@@ -1833,7 +1833,7 @@ static int nv_alloc_rx(struct net_device *dev)
1833
1833
wmb ();
1834
1834
np -> put_rx .orig -> flaglen = cpu_to_le32 (np -> rx_buf_sz | NV_RX_AVAIL );
1835
1835
if (unlikely (np -> put_rx .orig ++ == np -> last_rx .orig ))
1836
- np -> put_rx .orig = np -> first_rx .orig ;
1836
+ np -> put_rx .orig = np -> rx_ring .orig ;
1837
1837
if (unlikely (np -> put_rx_ctx ++ == np -> last_rx_ctx ))
1838
1838
np -> put_rx_ctx = np -> first_rx_ctx ;
1839
1839
} else {
@@ -1853,7 +1853,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1853
1853
struct ring_desc_ex * less_rx ;
1854
1854
1855
1855
less_rx = np -> get_rx .ex ;
1856
- if (less_rx -- == np -> first_rx .ex )
1856
+ if (less_rx -- == np -> rx_ring .ex )
1857
1857
less_rx = np -> last_rx .ex ;
1858
1858
1859
1859
while (np -> put_rx .ex != less_rx ) {
@@ -1875,7 +1875,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1875
1875
wmb ();
1876
1876
np -> put_rx .ex -> flaglen = cpu_to_le32 (np -> rx_buf_sz | NV_RX2_AVAIL );
1877
1877
if (unlikely (np -> put_rx .ex ++ == np -> last_rx .ex ))
1878
- np -> put_rx .ex = np -> first_rx .ex ;
1878
+ np -> put_rx .ex = np -> rx_ring .ex ;
1879
1879
if (unlikely (np -> put_rx_ctx ++ == np -> last_rx_ctx ))
1880
1880
np -> put_rx_ctx = np -> first_rx_ctx ;
1881
1881
} else {
@@ -1903,7 +1903,8 @@ static void nv_init_rx(struct net_device *dev)
1903
1903
struct fe_priv * np = netdev_priv (dev );
1904
1904
int i ;
1905
1905
1906
- np -> get_rx = np -> put_rx = np -> first_rx = np -> rx_ring ;
1906
+ np -> get_rx = np -> rx_ring ;
1907
+ np -> put_rx = np -> rx_ring ;
1907
1908
1908
1909
if (!nv_optimized (np ))
1909
1910
np -> last_rx .orig = & np -> rx_ring .orig [np -> rx_ring_size - 1 ];
@@ -2911,7 +2912,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2911
2912
u64_stats_update_end (& np -> swstats_rx_syncp );
2912
2913
next_pkt :
2913
2914
if (unlikely (np -> get_rx .orig ++ == np -> last_rx .orig ))
2914
- np -> get_rx .orig = np -> first_rx .orig ;
2915
+ np -> get_rx .orig = np -> rx_ring .orig ;
2915
2916
if (unlikely (np -> get_rx_ctx ++ == np -> last_rx_ctx ))
2916
2917
np -> get_rx_ctx = np -> first_rx_ctx ;
2917
2918
@@ -3000,7 +3001,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
3000
3001
}
3001
3002
next_pkt :
3002
3003
if (unlikely (np -> get_rx .ex ++ == np -> last_rx .ex ))
3003
- np -> get_rx .ex = np -> first_rx .ex ;
3004
+ np -> get_rx .ex = np -> rx_ring .ex ;
3004
3005
if (unlikely (np -> get_rx_ctx ++ == np -> last_rx_ctx ))
3005
3006
np -> get_rx_ctx = np -> first_rx_ctx ;
3006
3007
0 commit comments