@@ -520,9 +520,7 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
520
520
!vhost_vq_avail_empty (vq -> dev , vq );
521
521
}
522
522
523
- /* Expects to be always run from workqueue - which acts as
524
- * read-size critical section for our kind of RCU. */
525
- static void handle_tx (struct vhost_net * net )
523
+ static void handle_tx_copy (struct vhost_net * net , struct socket * sock )
526
524
{
527
525
struct vhost_net_virtqueue * nvq = & net -> vqs [VHOST_NET_VQ_TX ];
528
526
struct vhost_virtqueue * vq = & nvq -> vq ;
@@ -537,30 +535,76 @@ static void handle_tx(struct vhost_net *net)
537
535
};
538
536
size_t len , total_len = 0 ;
539
537
int err ;
540
- struct socket * sock ;
541
- struct vhost_net_ubuf_ref * uninitialized_var (ubufs );
542
- bool zcopy , zcopy_used ;
543
538
int sent_pkts = 0 ;
544
539
545
- mutex_lock (& vq -> mutex );
546
- sock = vq -> private_data ;
547
- if (!sock )
548
- goto out ;
540
+ for (;;) {
541
+ bool busyloop_intr = false;
549
542
550
- if (!vq_iotlb_prefetch (vq ))
551
- goto out ;
543
+ head = get_tx_bufs (net , nvq , & msg , & out , & in , & len ,
544
+ & busyloop_intr );
545
+ /* On error, stop handling until the next kick. */
546
+ if (unlikely (head < 0 ))
547
+ break ;
548
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
549
+ if (head == vq -> num ) {
550
+ if (unlikely (busyloop_intr )) {
551
+ vhost_poll_queue (& vq -> poll );
552
+ } else if (unlikely (vhost_enable_notify (& net -> dev ,
553
+ vq ))) {
554
+ vhost_disable_notify (& net -> dev , vq );
555
+ continue ;
556
+ }
557
+ break ;
558
+ }
552
559
553
- vhost_disable_notify (& net -> dev , vq );
554
- vhost_net_disable_vq (net , vq );
560
+ total_len += len ;
561
+ if (tx_can_batch (vq , total_len ))
562
+ msg .msg_flags |= MSG_MORE ;
563
+ else
564
+ msg .msg_flags &= ~MSG_MORE ;
565
+
566
+ /* TODO: Check specific error and bomb out unless ENOBUFS? */
567
+ err = sock -> ops -> sendmsg (sock , & msg , len );
568
+ if (unlikely (err < 0 )) {
569
+ vhost_discard_vq_desc (vq , 1 );
570
+ vhost_net_enable_vq (net , vq );
571
+ break ;
572
+ }
573
+ if (err != len )
574
+ pr_debug ("Truncated TX packet: len %d != %zd\n" ,
575
+ err , len );
576
+ vhost_add_used_and_signal (& net -> dev , vq , head , 0 );
577
+ if (vhost_exceeds_weight (++ sent_pkts , total_len )) {
578
+ vhost_poll_queue (& vq -> poll );
579
+ break ;
580
+ }
581
+ }
582
+ }
555
583
556
- zcopy = nvq -> ubufs ;
584
+ static void handle_tx_zerocopy (struct vhost_net * net , struct socket * sock )
585
+ {
586
+ struct vhost_net_virtqueue * nvq = & net -> vqs [VHOST_NET_VQ_TX ];
587
+ struct vhost_virtqueue * vq = & nvq -> vq ;
588
+ unsigned out , in ;
589
+ int head ;
590
+ struct msghdr msg = {
591
+ .msg_name = NULL ,
592
+ .msg_namelen = 0 ,
593
+ .msg_control = NULL ,
594
+ .msg_controllen = 0 ,
595
+ .msg_flags = MSG_DONTWAIT ,
596
+ };
597
+ size_t len , total_len = 0 ;
598
+ int err ;
599
+ struct vhost_net_ubuf_ref * uninitialized_var (ubufs );
600
+ bool zcopy_used ;
601
+ int sent_pkts = 0 ;
557
602
558
603
for (;;) {
559
604
bool busyloop_intr ;
560
605
561
606
/* Release DMAs done buffers first */
562
- if (zcopy )
563
- vhost_zerocopy_signal_used (net , vq );
607
+ vhost_zerocopy_signal_used (net , vq );
564
608
565
609
busyloop_intr = false;
566
610
head = get_tx_bufs (net , nvq , & msg , & out , & in , & len ,
@@ -579,9 +623,9 @@ static void handle_tx(struct vhost_net *net)
579
623
break ;
580
624
}
581
625
582
- zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
583
- && !vhost_exceeds_maxpend (net )
584
- && vhost_net_tx_select_zcopy (net );
626
+ zcopy_used = len >= VHOST_GOODCOPY_LEN
627
+ && !vhost_exceeds_maxpend (net )
628
+ && vhost_net_tx_select_zcopy (net );
585
629
586
630
/* use msg_control to pass vhost zerocopy ubuf info to skb */
587
631
if (zcopy_used ) {
@@ -636,6 +680,32 @@ static void handle_tx(struct vhost_net *net)
636
680
break ;
637
681
}
638
682
}
683
+ }
684
+
685
+ /* Expects to be always run from workqueue - which acts as
686
+ * read-size critical section for our kind of RCU. */
687
+ static void handle_tx (struct vhost_net * net )
688
+ {
689
+ struct vhost_net_virtqueue * nvq = & net -> vqs [VHOST_NET_VQ_TX ];
690
+ struct vhost_virtqueue * vq = & nvq -> vq ;
691
+ struct socket * sock ;
692
+
693
+ mutex_lock (& vq -> mutex );
694
+ sock = vq -> private_data ;
695
+ if (!sock )
696
+ goto out ;
697
+
698
+ if (!vq_iotlb_prefetch (vq ))
699
+ goto out ;
700
+
701
+ vhost_disable_notify (& net -> dev , vq );
702
+ vhost_net_disable_vq (net , vq );
703
+
704
+ if (vhost_sock_zcopy (sock ))
705
+ handle_tx_zerocopy (net , sock );
706
+ else
707
+ handle_tx_copy (net , sock );
708
+
639
709
out :
640
710
mutex_unlock (& vq -> mutex );
641
711
}
0 commit comments