Skip to content

Commit 0d20bdf

Browse files
jasowangdavem330
authored andcommitted
vhost_net: split out datacopy logic
Instead of mixing zerocopy and datacopy logics, this patch tries to split datacopy logic out. This results for a more compact code and ad-hoc optimization could be done on top more easily. Signed-off-by: Jason Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent c92a8a8 commit 0d20bdf

File tree

1 file changed

+90
-20
lines changed

1 file changed

+90
-20
lines changed

drivers/vhost/net.c

Lines changed: 90 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -520,9 +520,7 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
520520
!vhost_vq_avail_empty(vq->dev, vq);
521521
}
522522

523-
/* Expects to be always run from workqueue - which acts as
524-
* read-size critical section for our kind of RCU. */
525-
static void handle_tx(struct vhost_net *net)
523+
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
526524
{
527525
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
528526
struct vhost_virtqueue *vq = &nvq->vq;
@@ -537,30 +535,76 @@ static void handle_tx(struct vhost_net *net)
537535
};
538536
size_t len, total_len = 0;
539537
int err;
540-
struct socket *sock;
541-
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
542-
bool zcopy, zcopy_used;
543538
int sent_pkts = 0;
544539

545-
mutex_lock(&vq->mutex);
546-
sock = vq->private_data;
547-
if (!sock)
548-
goto out;
540+
for (;;) {
541+
bool busyloop_intr = false;
549542

550-
if (!vq_iotlb_prefetch(vq))
551-
goto out;
543+
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
544+
&busyloop_intr);
545+
/* On error, stop handling until the next kick. */
546+
if (unlikely(head < 0))
547+
break;
548+
/* Nothing new? Wait for eventfd to tell us they refilled. */
549+
if (head == vq->num) {
550+
if (unlikely(busyloop_intr)) {
551+
vhost_poll_queue(&vq->poll);
552+
} else if (unlikely(vhost_enable_notify(&net->dev,
553+
vq))) {
554+
vhost_disable_notify(&net->dev, vq);
555+
continue;
556+
}
557+
break;
558+
}
552559

553-
vhost_disable_notify(&net->dev, vq);
554-
vhost_net_disable_vq(net, vq);
560+
total_len += len;
561+
if (tx_can_batch(vq, total_len))
562+
msg.msg_flags |= MSG_MORE;
563+
else
564+
msg.msg_flags &= ~MSG_MORE;
565+
566+
/* TODO: Check specific error and bomb out unless ENOBUFS? */
567+
err = sock->ops->sendmsg(sock, &msg, len);
568+
if (unlikely(err < 0)) {
569+
vhost_discard_vq_desc(vq, 1);
570+
vhost_net_enable_vq(net, vq);
571+
break;
572+
}
573+
if (err != len)
574+
pr_debug("Truncated TX packet: len %d != %zd\n",
575+
err, len);
576+
vhost_add_used_and_signal(&net->dev, vq, head, 0);
577+
if (vhost_exceeds_weight(++sent_pkts, total_len)) {
578+
vhost_poll_queue(&vq->poll);
579+
break;
580+
}
581+
}
582+
}
555583

556-
zcopy = nvq->ubufs;
584+
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
585+
{
586+
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
587+
struct vhost_virtqueue *vq = &nvq->vq;
588+
unsigned out, in;
589+
int head;
590+
struct msghdr msg = {
591+
.msg_name = NULL,
592+
.msg_namelen = 0,
593+
.msg_control = NULL,
594+
.msg_controllen = 0,
595+
.msg_flags = MSG_DONTWAIT,
596+
};
597+
size_t len, total_len = 0;
598+
int err;
599+
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
600+
bool zcopy_used;
601+
int sent_pkts = 0;
557602

558603
for (;;) {
559604
bool busyloop_intr;
560605

561606
/* Release DMAs done buffers first */
562-
if (zcopy)
563-
vhost_zerocopy_signal_used(net, vq);
607+
vhost_zerocopy_signal_used(net, vq);
564608

565609
busyloop_intr = false;
566610
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
@@ -579,9 +623,9 @@ static void handle_tx(struct vhost_net *net)
579623
break;
580624
}
581625

582-
zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
583-
&& !vhost_exceeds_maxpend(net)
584-
&& vhost_net_tx_select_zcopy(net);
626+
zcopy_used = len >= VHOST_GOODCOPY_LEN
627+
&& !vhost_exceeds_maxpend(net)
628+
&& vhost_net_tx_select_zcopy(net);
585629

586630
/* use msg_control to pass vhost zerocopy ubuf info to skb */
587631
if (zcopy_used) {
@@ -636,6 +680,32 @@ static void handle_tx(struct vhost_net *net)
636680
break;
637681
}
638682
}
683+
}
684+
685+
/* Expects to be always run from workqueue - which acts as
686+
* read-size critical section for our kind of RCU. */
687+
static void handle_tx(struct vhost_net *net)
688+
{
689+
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
690+
struct vhost_virtqueue *vq = &nvq->vq;
691+
struct socket *sock;
692+
693+
mutex_lock(&vq->mutex);
694+
sock = vq->private_data;
695+
if (!sock)
696+
goto out;
697+
698+
if (!vq_iotlb_prefetch(vq))
699+
goto out;
700+
701+
vhost_disable_notify(&net->dev, vq);
702+
vhost_net_disable_vq(net, vq);
703+
704+
if (vhost_sock_zcopy(sock))
705+
handle_tx_zerocopy(net, sock);
706+
else
707+
handle_tx_copy(net, sock);
708+
639709
out:
640710
mutex_unlock(&vq->mutex);
641711
}

0 commit comments

Comments
 (0)