@@ -1002,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
1002
1002
& size , budget );
1003
1003
1004
1004
if (packets ) {
1005
+ netdev_completed_queue (ndev , packets , size );
1005
1006
u64_stats_update_begin (& lp -> tx_stat_sync );
1006
1007
u64_stats_add (& lp -> tx_packets , packets );
1007
1008
u64_stats_add (& lp -> tx_bytes , size );
@@ -1125,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1125
1126
if (++ new_tail_ptr >= lp -> tx_bd_num )
1126
1127
new_tail_ptr = 0 ;
1127
1128
WRITE_ONCE (lp -> tx_bd_tail , new_tail_ptr );
1129
+ netdev_sent_queue (ndev , skb -> len );
1128
1130
1129
1131
/* Start the transfer */
1130
1132
axienet_dma_out_addr (lp , XAXIDMA_TX_TDESC_OFFSET , tail_p );
@@ -1751,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev)
1751
1753
dma_release_channel (lp -> tx_chan );
1752
1754
}
1753
1755
1756
+ netdev_reset_queue (ndev );
1754
1757
axienet_iow (lp , XAE_IE_OFFSET , 0 );
1755
1758
1756
1759
if (lp -> eth_irq > 0 )
@@ -2676,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
2676
2679
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN ));
2677
2680
2678
2681
axienet_dma_stop (lp );
2682
+ netdev_reset_queue (ndev );
2679
2683
2680
2684
for (i = 0 ; i < lp -> tx_bd_num ; i ++ ) {
2681
2685
cur_p = & lp -> tx_bd_v [i ];
0 commit comments