diff options
| author | Laurent Vivier <lvivier@redhat.com> | 2026-03-18 10:19:41 +0100 |
|---|---|---|
| committer | Stefano Brivio <sbrivio@redhat.com> | 2026-03-20 20:05:34 +0100 |
| commit | 451fb7600b8dfdf6800abee613f2b4f026fbd150 (patch) | |
| tree | 85d520fc7ef158b10d1a8f96f4c3193645de2dd5 | |
| parent | f5391ae1b72680718e90b5ff6d83feec13f55366 (diff) | |
| download | passt-451fb7600b8dfdf6800abee613f2b4f026fbd150.tar passt-451fb7600b8dfdf6800abee613f2b4f026fbd150.tar.gz passt-451fb7600b8dfdf6800abee613f2b4f026fbd150.tar.bz2 passt-451fb7600b8dfdf6800abee613f2b4f026fbd150.tar.lz passt-451fb7600b8dfdf6800abee613f2b4f026fbd150.tar.xz passt-451fb7600b8dfdf6800abee613f2b4f026fbd150.tar.zst passt-451fb7600b8dfdf6800abee613f2b4f026fbd150.zip | |
vu_common: Move iovec management into vu_collect()
Previously, callers had to pre-initialize virtqueue elements with iovec
entries using vu_set_element() or vu_init_elem() before calling
vu_collect(). This meant each element owned a fixed, pre-assigned iovec
slot.
Move the iovec array into vu_collect() as explicit parameters (in_sg,
max_in_sg, and in_total), letting it pass the remaining iovec capacity
directly to vu_queue_pop(). A running current_iov counter tracks
consumed entries across elements, so multiple elements share a single
iovec pool. The optional in_total output parameter reports how many iovec
entries were consumed, allowing callers to track usage across multiple
vu_collect() calls.
This removes vu_set_element() and vu_init_elem() which are no longer
needed, and is a prerequisite for multi-buffer support where a single
virtqueue element can use more than one iovec entry. For now, callers
assert the current single-iovec-per-element invariant until they are
updated to handle multiple iovecs.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
| -rw-r--r-- | tcp_vu.c | 25 | ||||
| -rw-r--r-- | udp_vu.c | 21 | ||||
| -rw-r--r-- | vu_common.c | 68 | ||||
| -rw-r--r-- | vu_common.h | 22 |
4 files changed, 60 insertions, 76 deletions
@@ -87,13 +87,13 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags) hdrlen = tcp_vu_hdrlen(CONN_V6(conn)); - vu_set_element(&flags_elem[0], NULL, &flags_iov[0]); - elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1, + &flags_iov[0], 1, NULL, MAX(hdrlen + sizeof(*opts), ETH_ZLEN + VNET_HLEN), NULL); if (elem_cnt != 1) return -1; + ASSERT(flags_elem[0].in_num == 1); ASSERT(flags_elem[0].in_sg[0].iov_len >= MAX(hdrlen + sizeof(*opts), ETH_ZLEN + VNET_HLEN)); @@ -148,9 +148,8 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags) nb_ack = 1; if (flags & DUP_ACK) { - vu_set_element(&flags_elem[1], NULL, &flags_iov[1]); - elem_cnt = vu_collect(vdev, vq, &flags_elem[1], 1, + &flags_iov[1], 1, NULL, flags_elem[0].in_sg[0].iov_len, NULL); if (elem_cnt == 1 && flags_elem[1].in_sg[0].iov_len >= @@ -191,8 +190,8 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, const struct vu_dev *vdev = c->vdev; struct msghdr mh_sock = { 0 }; uint16_t mss = MSS_GET(conn); + size_t hdrlen, iov_used; int s = conn->sock; - size_t hdrlen; int elem_cnt; ssize_t ret; int i; @@ -201,22 +200,26 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, hdrlen = tcp_vu_hdrlen(v6); - vu_init_elem(elem, &iov_vu[DISCARD_IOV_NUM], VIRTQUEUE_MAX_SIZE); - + iov_used = 0; elem_cnt = 0; *head_cnt = 0; - while (fillsize > 0 && elem_cnt < VIRTQUEUE_MAX_SIZE) { + while (fillsize > 0 && elem_cnt < ARRAY_SIZE(elem) && + iov_used < VIRTQUEUE_MAX_SIZE) { + size_t frame_size, dlen, in_total; struct iovec *iov; - size_t frame_size, dlen; int cnt; cnt = vu_collect(vdev, vq, &elem[elem_cnt], - VIRTQUEUE_MAX_SIZE - elem_cnt, + ARRAY_SIZE(elem) - elem_cnt, + &iov_vu[DISCARD_IOV_NUM + iov_used], + VIRTQUEUE_MAX_SIZE - iov_used, &in_total, MAX(MIN(mss, fillsize) + hdrlen, ETH_ZLEN + VNET_HLEN), &frame_size); if (cnt == 0) break; + ASSERT((size_t)cnt == in_total); /* one iovec per element */ + iov_used += in_total; dlen = frame_size - hdrlen; /* reserve space for headers in iov */ @@ -247,7 +250,7 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, ret -= already_sent; /* adjust iov number and length of the last iov */ - i = iov_truncate(&iov_vu[DISCARD_IOV_NUM], elem_cnt, ret); + i = iov_truncate(&iov_vu[DISCARD_IOV_NUM], iov_used, ret); /* adjust head count */ while (*head_cnt > 0 && head[*head_cnt - 1] >= i) @@ -71,9 +71,10 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s, bool v6, ssize_t *dlen) { const struct vu_dev *vdev = c->vdev; + int elem_cnt, elem_used, iov_used; struct msghdr msg = { 0 }; - int iov_cnt, iov_used; size_t hdrlen, l2len; + size_t iov_cnt; ASSERT(!c->no_udp); @@ -89,13 +90,14 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s, /* compute L2 header length */ hdrlen = udp_vu_hdrlen(v6); - vu_init_elem(elem, iov_vu, VIRTQUEUE_MAX_SIZE); - - iov_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE, - IP_MAX_MTU + ETH_HLEN + VNET_HLEN, NULL); - if (iov_cnt == 0) + elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem), + iov_vu, ARRAY_SIZE(iov_vu), &iov_cnt, + IP_MAX_MTU + ETH_HLEN + VNET_HLEN, NULL); + if (elem_cnt == 0) return -1; + ASSERT((size_t)elem_cnt == iov_cnt); /* one iovec per element */ + /* reserve space for the headers */ ASSERT(iov_vu[0].iov_len >= MAX(hdrlen, ETH_ZLEN + VNET_HLEN)); iov_vu[0].iov_base = (char *)iov_vu[0].iov_base + hdrlen; @@ -107,7 +109,7 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s, *dlen = recvmsg(s, &msg, 0); if (*dlen < 0) { - vu_queue_rewind(vq, iov_cnt); + vu_queue_rewind(vq, elem_cnt); return -1; } @@ -116,15 +118,16 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s, iov_vu[0].iov_len += hdrlen; iov_used = iov_truncate(iov_vu, iov_cnt, *dlen + hdrlen); + elem_used = iov_used; /* one iovec per element */ /* pad frame to 60 bytes: first buffer is at least ETH_ZLEN long */ l2len = *dlen + hdrlen - VNET_HLEN; vu_pad(&iov_vu[0], l2len); - vu_set_vnethdr(iov_vu[0].iov_base, iov_used); + vu_set_vnethdr(iov_vu[0].iov_base, elem_used); /* release unused buffers */ - vu_queue_rewind(vq, iov_cnt - iov_used); + vu_queue_rewind(vq, elem_cnt - elem_used); return iov_used; } diff --git a/vu_common.c b/vu_common.c index 834a54f..b6e1fa9 100644 --- a/vu_common.c +++ b/vu_common.c @@ -52,27 +52,14 @@ int vu_packet_check_range(struct vdev_memory *memory, } /** - * vu_init_elem() - initialize an array of virtqueue elements with 1 iov in each - * @elem: Array of virtqueue elements to initialize - * @iov: Array of iovec to assign to virtqueue element - * @elem_cnt: Number of virtqueue element - */ -void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt) -{ - int i; - - for (i = 0; i < elem_cnt; i++) - vu_set_element(&elem[i], NULL, &iov[i]); -} - -/** * vu_collect() - collect virtio buffers from a given virtqueue * @vdev: vhost-user device * @vq: virtqueue to collect from - * @elem: Array of virtqueue element - * each element must be initialized with one iovec entry - * in the in_sg array. + * @elem: Array of @max_elem virtqueue elements * @max_elem: Number of virtqueue elements in the array + * @in_sg: Incoming iovec array for device-writable descriptors + * @max_in_sg: Maximum number of entries in @in_sg + * @in_total: Number of collected entries from @in_sg (output) * @size: Maximum size of the data in the frame * @collected: Collected buffer length, up to @size, set on return * @@ -80,20 +67,21 @@ void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt */ int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq, struct vu_virtq_element *elem, int max_elem, + struct iovec *in_sg, size_t max_in_sg, size_t *in_total, size_t size, size_t *collected) { size_t current_size = 0; + size_t current_iov = 0; int elem_cnt = 0; - while (current_size < size && elem_cnt < max_elem) { - struct iovec *iov; + while (current_size < size && elem_cnt < max_elem && + current_iov < max_in_sg) { int ret; ret = vu_queue_pop(vdev, vq, &elem[elem_cnt], - elem[elem_cnt].in_sg, - elem[elem_cnt].in_num, - elem[elem_cnt].out_sg, - elem[elem_cnt].out_num); + &in_sg[current_iov], + max_in_sg - current_iov, + NULL, 0); if (ret < 0) break; @@ -103,18 +91,22 @@ int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq, break; } - iov = &elem[elem_cnt].in_sg[0]; - - if (iov->iov_len > size - current_size) - iov->iov_len = size - current_size; + elem[elem_cnt].in_num = iov_truncate(elem[elem_cnt].in_sg, + elem[elem_cnt].in_num, + size - current_size); - current_size += iov->iov_len; + current_size += iov_size(elem[elem_cnt].in_sg, + elem[elem_cnt].in_num); + current_iov += elem[elem_cnt].in_num; elem_cnt++; if (!vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) break; } + if (in_total) + *in_total = current_iov; + if (collected) *collected = current_size; @@ -147,8 +139,11 @@ void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq, { int i; - for (i = 0; i < elem_cnt; i++) - vu_queue_fill(vdev, vq, &elem[i], elem[i].in_sg[0].iov_len, i); + for (i = 0; i < elem_cnt; i++) { + size_t elem_size = iov_size(elem[i].in_sg, elem[i].in_num); + + vu_queue_fill(vdev, vq, &elem[i], elem_size, i); + } vu_queue_flush(vdev, vq, elem_cnt); vu_queue_notify(vdev, vq); @@ -245,7 +240,7 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size) struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE]; struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE]; struct iovec in_sg[VIRTQUEUE_MAX_SIZE]; - size_t total; + size_t total, in_total; int elem_cnt; int i; @@ -256,11 +251,10 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size) return -1; } - vu_init_elem(elem, in_sg, VIRTQUEUE_MAX_SIZE); - size += VNET_HLEN; - elem_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE, size, &total); - if (total < size) { + elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem), in_sg, + ARRAY_SIZE(in_sg), &in_total, size, &total); + if (elem_cnt == 0 || total < size) { debug("vu_send_single: no space to send the data " "elem_cnt %d size %zd", elem_cnt, total); goto err; @@ -271,10 +265,10 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size) total -= VNET_HLEN; /* copy data from the buffer to the iovec */ - iov_from_buf(in_sg, elem_cnt, VNET_HLEN, buf, total); + iov_from_buf(in_sg, in_total, VNET_HLEN, buf, total); if (*c->pcap) - pcap_iov(in_sg, elem_cnt, VNET_HLEN); + pcap_iov(in_sg, in_total, VNET_HLEN); vu_flush(vdev, vq, elem, elem_cnt); diff --git a/vu_common.h b/vu_common.h index 865d977..7b060eb 100644 --- a/vu_common.h +++ b/vu_common.h @@ -35,26 +35,10 @@ static inline void *vu_payloadv6(void *base) return (struct ipv6hdr *)vu_ip(base) + 1; } -/** - * vu_set_element() - Initialize a vu_virtq_element - * @elem: Element to initialize - * @out_sg: One out iovec entry to set in elem - * @in_sg: One in iovec entry to set in elem - */ -static inline void vu_set_element(struct vu_virtq_element *elem, - struct iovec *out_sg, struct iovec *in_sg) -{ - elem->out_num = !!out_sg; - elem->out_sg = out_sg; - elem->in_num = !!in_sg; - elem->in_sg = in_sg; -} - -void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, - int elem_cnt); int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq, - struct vu_virtq_element *elem, int max_elem, size_t size, - size_t *collected); + struct vu_virtq_element *elem, int max_elem, + struct iovec *in_sg, size_t max_in_sg, size_t *in_total, + size_t size, size_t *collected); void vu_set_vnethdr(struct virtio_net_hdr_mrg_rxbuf *vnethdr, int num_buffers); void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq, struct vu_virtq_element *elem, int elem_cnt); |
