@@ -741,7 +741,7 @@ static int sk_has_rx_data(struct sock *sk)
741
741
return skb_queue_empty (& sk -> sk_receive_queue );
742
742
}
743
743
744
- static void vhost_rx_signal_used (struct vhost_net_virtqueue * nvq )
744
+ static void vhost_net_signal_used (struct vhost_net_virtqueue * nvq )
745
745
{
746
746
struct vhost_virtqueue * vq = & nvq -> vq ;
747
747
struct vhost_dev * dev = vq -> dev ;
@@ -765,7 +765,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
765
765
766
766
if (!len && tvq -> busyloop_timeout ) {
767
767
/* Flush batched heads first */
768
- vhost_rx_signal_used (rnvq );
768
+ vhost_net_signal_used (rnvq );
769
769
/* Both tx vq and rx socket were polled here */
770
770
mutex_lock_nested (& tvq -> mutex , 1 );
771
771
vhost_disable_notify (& net -> dev , tvq );
@@ -1008,7 +1008,7 @@ static void handle_rx(struct vhost_net *net)
1008
1008
}
1009
1009
nvq -> done_idx += headcount ;
1010
1010
if (nvq -> done_idx > VHOST_RX_BATCH )
1011
- vhost_rx_signal_used (nvq );
1011
+ vhost_net_signal_used (nvq );
1012
1012
if (unlikely (vq_log ))
1013
1013
vhost_log_write (vq , vq_log , log , vhost_len );
1014
1014
total_len += vhost_len ;
@@ -1022,7 +1022,7 @@ static void handle_rx(struct vhost_net *net)
1022
1022
else
1023
1023
vhost_net_enable_vq (net , vq );
1024
1024
out :
1025
- vhost_rx_signal_used (nvq );
1025
+ vhost_net_signal_used (nvq );
1026
1026
mutex_unlock (& vq -> mutex );
1027
1027
}
1028
1028
0 commit comments