@@ -1822,9 +1822,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
1822
1822
lower_32_bits (rdata -> rdesc_dma ));
1823
1823
}
1824
1824
1825
- static struct sk_buff * xgbe_create_skb (struct napi_struct * napi ,
1825
+ static struct sk_buff * xgbe_create_skb (struct xgbe_prv_data * pdata ,
1826
+ struct napi_struct * napi ,
1826
1827
struct xgbe_ring_data * rdata ,
1827
- unsigned int * len )
1828
+ unsigned int len )
1828
1829
{
1829
1830
struct sk_buff * skb ;
1830
1831
u8 * packet ;
@@ -1834,14 +1835,31 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
1834
1835
if (!skb )
1835
1836
return NULL ;
1836
1837
1838
+ /* Start with the header buffer which may contain just the header
1839
+ * or the header plus data
1840
+ */
1841
+ dma_sync_single_for_cpu (pdata -> dev , rdata -> rx .hdr .dma ,
1842
+ rdata -> rx .hdr .dma_len , DMA_FROM_DEVICE );
1843
+
1837
1844
packet = page_address (rdata -> rx .hdr .pa .pages ) +
1838
1845
rdata -> rx .hdr .pa .pages_offset ;
1839
- copy_len = (rdata -> rx .hdr_len ) ? rdata -> rx .hdr_len : * len ;
1846
+ copy_len = (rdata -> rx .hdr_len ) ? rdata -> rx .hdr_len : len ;
1840
1847
copy_len = min (rdata -> rx .hdr .dma_len , copy_len );
1841
1848
skb_copy_to_linear_data (skb , packet , copy_len );
1842
1849
skb_put (skb , copy_len );
1843
1850
1844
- * len -= copy_len ;
1851
+ len -= copy_len ;
1852
+ if (len ) {
1853
+ /* Add the remaining data as a frag */
1854
+ dma_sync_single_for_cpu (pdata -> dev , rdata -> rx .buf .dma ,
1855
+ rdata -> rx .buf .dma_len , DMA_FROM_DEVICE );
1856
+
1857
+ skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
1858
+ rdata -> rx .buf .pa .pages ,
1859
+ rdata -> rx .buf .pa .pages_offset ,
1860
+ len , rdata -> rx .buf .dma_len );
1861
+ rdata -> rx .buf .pa .pages = NULL ;
1862
+ }
1845
1863
1846
1864
return skb ;
1847
1865
}
@@ -1923,7 +1941,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1923
1941
struct sk_buff * skb ;
1924
1942
struct skb_shared_hwtstamps * hwtstamps ;
1925
1943
unsigned int incomplete , error , context_next , context ;
1926
- unsigned int len , put_len , max_len ;
1944
+ unsigned int len , rdesc_len , max_len ;
1927
1945
unsigned int received = 0 ;
1928
1946
int packet_count = 0 ;
1929
1947
@@ -1933,6 +1951,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1933
1951
if (!ring )
1934
1952
return 0 ;
1935
1953
1954
+ incomplete = 0 ;
1955
+ context_next = 0 ;
1956
+
1936
1957
napi = (pdata -> per_channel_irq ) ? & channel -> napi : & pdata -> napi ;
1937
1958
1938
1959
rdata = XGBE_GET_DESC_DATA (ring , ring -> cur );
@@ -1942,15 +1963,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1942
1963
1943
1964
/* First time in loop see if we need to restore state */
1944
1965
if (!received && rdata -> state_saved ) {
1945
- incomplete = rdata -> state .incomplete ;
1946
- context_next = rdata -> state .context_next ;
1947
1966
skb = rdata -> state .skb ;
1948
1967
error = rdata -> state .error ;
1949
1968
len = rdata -> state .len ;
1950
1969
} else {
1951
1970
memset (packet , 0 , sizeof (* packet ));
1952
- incomplete = 0 ;
1953
- context_next = 0 ;
1954
1971
skb = NULL ;
1955
1972
error = 0 ;
1956
1973
len = 0 ;
@@ -1991,23 +2008,16 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1991
2008
}
1992
2009
1993
2010
if (!context ) {
1994
- put_len = rdata -> rx .len - len ;
1995
- len += put_len ;
1996
-
1997
- if (!skb ) {
1998
- dma_sync_single_for_cpu (pdata -> dev ,
1999
- rdata -> rx .hdr .dma ,
2000
- rdata -> rx .hdr .dma_len ,
2001
- DMA_FROM_DEVICE );
2002
-
2003
- skb = xgbe_create_skb (napi , rdata , & put_len );
2004
- if (!skb ) {
2011
+ /* Length is cumulative, get this descriptor's length */
2012
+ rdesc_len = rdata -> rx .len - len ;
2013
+ len += rdesc_len ;
2014
+
2015
+ if (rdesc_len && !skb ) {
2016
+ skb = xgbe_create_skb (pdata , napi , rdata ,
2017
+ rdesc_len );
2018
+ if (!skb )
2005
2019
error = 1 ;
2006
- goto skip_data ;
2007
- }
2008
- }
2009
-
2010
- if (put_len ) {
2020
+ } else if (rdesc_len ) {
2011
2021
dma_sync_single_for_cpu (pdata -> dev ,
2012
2022
rdata -> rx .buf .dma ,
2013
2023
rdata -> rx .buf .dma_len ,
@@ -2016,12 +2026,12 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2016
2026
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags ,
2017
2027
rdata -> rx .buf .pa .pages ,
2018
2028
rdata -> rx .buf .pa .pages_offset ,
2019
- put_len , rdata -> rx .buf .dma_len );
2029
+ rdesc_len ,
2030
+ rdata -> rx .buf .dma_len );
2020
2031
rdata -> rx .buf .pa .pages = NULL ;
2021
2032
}
2022
2033
}
2023
2034
2024
- skip_data :
2025
2035
if (incomplete || context_next )
2026
2036
goto read_again ;
2027
2037
@@ -2084,8 +2094,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
2084
2094
if (received && (incomplete || context_next )) {
2085
2095
rdata = XGBE_GET_DESC_DATA (ring , ring -> cur );
2086
2096
rdata -> state_saved = 1 ;
2087
- rdata -> state .incomplete = incomplete ;
2088
- rdata -> state .context_next = context_next ;
2089
2097
rdata -> state .skb = skb ;
2090
2098
rdata -> state .len = len ;
2091
2099
rdata -> state .error = error ;
0 commit comments