@@ -707,7 +707,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
707
707
struct hv_netvsc_packet * packet ,
708
708
struct rndis_message * rndis_msg ,
709
709
struct hv_page_buffer * pb ,
710
- struct sk_buff * skb )
710
+ bool xmit_more )
711
711
{
712
712
char * start = net_device -> send_buf ;
713
713
char * dest = start + (section_index * net_device -> send_section_size )
@@ -720,7 +720,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
720
720
721
721
/* Add padding */
722
722
remain = packet -> total_data_buflen & (net_device -> pkt_align - 1 );
723
- if (skb -> xmit_more && remain && ! packet -> cp_partial ) {
723
+ if (xmit_more && remain ) {
724
724
padding = net_device -> pkt_align - remain ;
725
725
rndis_msg -> msg_len += padding ;
726
726
packet -> total_data_buflen += padding ;
@@ -829,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
829
829
}
830
830
831
831
/* RCU already held by caller */
832
- int netvsc_send (struct net_device_context * ndev_ctx ,
832
+ int netvsc_send (struct net_device * ndev ,
833
833
struct hv_netvsc_packet * packet ,
834
834
struct rndis_message * rndis_msg ,
835
835
struct hv_page_buffer * pb ,
836
836
struct sk_buff * skb )
837
837
{
838
+ struct net_device_context * ndev_ctx = netdev_priv (ndev );
838
839
struct netvsc_device * net_device
839
840
= rcu_dereference_bh (ndev_ctx -> nvdev );
840
841
struct hv_device * device = ndev_ctx -> device_ctx ;
@@ -845,7 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
845
846
struct multi_send_data * msdp ;
846
847
struct hv_netvsc_packet * msd_send = NULL , * cur_send = NULL ;
847
848
struct sk_buff * msd_skb = NULL ;
848
- bool try_batch ;
849
+ bool try_batch , xmit_more ;
849
850
850
851
/* If device is rescinded, return error and packet will get dropped. */
851
852
if (unlikely (!net_device || net_device -> destroy ))
@@ -896,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
896
897
}
897
898
}
898
899
900
+ /* Keep aggregating only if stack says more data is coming
901
+ * and not doing mixed modes send and not flow blocked
902
+ */
903
+ xmit_more = skb -> xmit_more &&
904
+ !packet -> cp_partial &&
905
+ !netif_xmit_stopped (netdev_get_tx_queue (ndev , packet -> q_idx ));
906
+
899
907
if (section_index != NETVSC_INVALID_INDEX ) {
900
908
netvsc_copy_to_send_buf (net_device ,
901
909
section_index , msd_len ,
902
- packet , rndis_msg , pb , skb );
910
+ packet , rndis_msg , pb , xmit_more );
903
911
904
912
packet -> send_buf_index = section_index ;
905
913
@@ -919,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
919
927
if (msdp -> skb )
920
928
dev_consume_skb_any (msdp -> skb );
921
929
922
- if (skb -> xmit_more && ! packet -> cp_partial ) {
930
+ if (xmit_more ) {
923
931
msdp -> skb = skb ;
924
932
msdp -> pkt = packet ;
925
933
msdp -> count ++ ;
0 commit comments