Skip to content

Commit 4afb52c

Browse files
jasowangdavem330
authored andcommitted
vhost_net: batch update used ring for datacopy TX
Like commit e2b3b35 ("vhost_net: batch used ring update in rx"), this patches implements batch used ring update for datacopy TX (zerocopy has already done some kind of batching). Testpmd transmission from guest to host (XDP_DROP on tap) shows 25.8% improvement (from ~3.1Mpps to ~3.9Mpps) on Broadwell i7-5600U CPU @ 2.60GHz machine. Netperf TCP tests does not show obvious differences. Signed-off-by: Jason Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent d0d8697 commit 4afb52c

File tree

1 file changed

+25
-15
lines changed

1 file changed

+25
-15
lines changed

drivers/vhost/net.c

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -428,16 +428,31 @@ static int vhost_net_enable_vq(struct vhost_net *n,
428428
return vhost_poll_start(poll, sock->file);
429429
}
430430

431+
static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
432+
{
433+
struct vhost_virtqueue *vq = &nvq->vq;
434+
struct vhost_dev *dev = vq->dev;
435+
436+
if (!nvq->done_idx)
437+
return;
438+
439+
vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
440+
nvq->done_idx = 0;
441+
}
442+
431443
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
432-
struct vhost_virtqueue *vq,
444+
struct vhost_net_virtqueue *nvq,
433445
unsigned int *out_num, unsigned int *in_num,
434446
bool *busyloop_intr)
435447
{
448+
struct vhost_virtqueue *vq = &nvq->vq;
436449
unsigned long uninitialized_var(endtime);
437450
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
438451
out_num, in_num, NULL, NULL);
439452

440453
if (r == vq->num && vq->busyloop_timeout) {
454+
if (!vhost_sock_zcopy(vq->private_data))
455+
vhost_net_signal_used(nvq);
441456
preempt_disable();
442457
endtime = busy_clock() + vq->busyloop_timeout;
443458
while (vhost_can_busy_poll(endtime)) {
@@ -493,7 +508,8 @@ static int get_tx_bufs(struct vhost_net *net,
493508
struct vhost_virtqueue *vq = &nvq->vq;
494509
int ret;
495510

496-
ret = vhost_net_tx_get_vq_desc(net, vq, out, in, busyloop_intr);
511+
ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
512+
497513
if (ret < 0 || ret == vq->num)
498514
return ret;
499515

@@ -557,6 +573,9 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
557573
break;
558574
}
559575

576+
vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
577+
vq->heads[nvq->done_idx].len = 0;
578+
560579
total_len += len;
561580
if (tx_can_batch(vq, total_len))
562581
msg.msg_flags |= MSG_MORE;
@@ -573,12 +592,15 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
573592
if (err != len)
574593
pr_debug("Truncated TX packet: len %d != %zd\n",
575594
err, len);
576-
vhost_add_used_and_signal(&net->dev, vq, head, 0);
595+
if (++nvq->done_idx >= VHOST_NET_BATCH)
596+
vhost_net_signal_used(nvq);
577597
if (vhost_exceeds_weight(++sent_pkts, total_len)) {
578598
vhost_poll_queue(&vq->poll);
579599
break;
580600
}
581601
}
602+
603+
vhost_net_signal_used(nvq);
582604
}
583605

584606
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
@@ -741,18 +763,6 @@ static int sk_has_rx_data(struct sock *sk)
741763
return skb_queue_empty(&sk->sk_receive_queue);
742764
}
743765

744-
static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
745-
{
746-
struct vhost_virtqueue *vq = &nvq->vq;
747-
struct vhost_dev *dev = vq->dev;
748-
749-
if (!nvq->done_idx)
750-
return;
751-
752-
vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
753-
nvq->done_idx = 0;
754-
}
755-
756766
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
757767
bool *busyloop_intr)
758768
{

0 commit comments

Comments
 (0)