diff options
author | Laurent Vivier <lvivier@redhat.com> | 2025-09-05 17:49:33 +0200 |
---|---|---|
committer | Stefano Brivio <sbrivio@redhat.com> | 2025-09-09 21:13:59 +0200 |
commit | 62399155319479f86b07d259b284f6a2991aaba7 (patch) | |
tree | 685948151c4e513c1da29f7c0633f47ddb311efd | |
parent | f9ee749cbb9bb1b962b37d6fd4655683b9953c13 (diff) | |
download | passt-62399155319479f86b07d259b284f6a2991aaba7.tar passt-62399155319479f86b07d259b284f6a2991aaba7.tar.gz passt-62399155319479f86b07d259b284f6a2991aaba7.tar.bz2 passt-62399155319479f86b07d259b284f6a2991aaba7.tar.lz passt-62399155319479f86b07d259b284f6a2991aaba7.tar.xz passt-62399155319479f86b07d259b284f6a2991aaba7.tar.zst passt-62399155319479f86b07d259b284f6a2991aaba7.zip |
vhost-user: Fix VHOST_USER_GET_QUEUE_NUM to return number of queues
The vhost-user specification states that VHOST_USER_GET_QUEUE_NUM should
return the maximum number of queues supported by the back-end, not the
number of virtqueues. Since each queue pair consists of RX and TX
virtqueues, we need to divide VHOST_USER_MAX_QUEUES by 2 to get the
correct queue count.
Also rename VHOST_USER_MAX_QUEUES to VHOST_USER_MAX_VQS throughout the
codebase to better reflect that it represents the maximum number of
virtqueues, not queue pairs.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
-rw-r--r-- | vhost_user.c | 16 | ||||
-rw-r--r-- | virtio.h | 4 |
2 files changed, 11 insertions, 9 deletions
diff --git a/vhost_user.c b/vhost_user.c index f97ec60..fa343a8 100644 --- a/vhost_user.c +++ b/vhost_user.c @@ -345,7 +345,7 @@ static void vu_set_enable_all_rings(struct vu_dev *vdev, bool enable) { uint16_t i; - for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) + for (i = 0; i < VHOST_USER_MAX_VQS; i++) vdev->vq[i].enable = enable; } @@ -477,7 +477,7 @@ static bool vu_set_mem_table_exec(struct vu_dev *vdev, close(vmsg->fds[i]); } - for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) { + for (i = 0; i < VHOST_USER_MAX_VQS; i++) { if (vdev->vq[i].vring.desc) { if (map_ring(vdev, &vdev->vq[i])) die("remapping queue %d during setmemtable", i); @@ -770,7 +770,7 @@ static void vu_check_queue_msg_file(struct vhost_user_msg *vmsg) bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; int idx = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; - if (idx >= VHOST_USER_MAX_QUEUES) + if (idx >= VHOST_USER_MAX_VQS) die("Invalid vhost-user queue index: %u", idx); if (nofd) { @@ -939,7 +939,9 @@ static bool vu_get_queue_num_exec(struct vu_dev *vdev, { (void)vdev; - vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_QUEUES); + vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_VQS / 2); + + debug("VHOST_USER_MAX_VQS %u", VHOST_USER_MAX_VQS / 2); return true; } @@ -960,7 +962,7 @@ static bool vu_set_vring_enable_exec(struct vu_dev *vdev, debug("State.index: %u", idx); debug("State.enable: %u", enable); - if (idx >= VHOST_USER_MAX_QUEUES) + if (idx >= VHOST_USER_MAX_VQS) die("Invalid vring_enable index: %u", idx); vdev->vq[idx].enable = enable; @@ -1052,7 +1054,7 @@ void vu_init(struct ctx *c) c->vdev = &vdev_storage; c->vdev->context = c; - for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) { + for (i = 0; i < VHOST_USER_MAX_VQS; i++) { c->vdev->vq[i] = (struct vu_virtq){ .call_fd = -1, .kick_fd = -1, @@ -1075,7 +1077,7 @@ void vu_cleanup(struct vu_dev *vdev) { unsigned int i; - for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) { + for (i = 0; i < VHOST_USER_MAX_VQS; i++) { struct vu_virtq *vq = &vdev->vq[i]; vq->started = false; @@ -88,7 +88,7 @@ struct vu_dev_region { uint64_t mmap_addr; }; -#define VHOST_USER_MAX_QUEUES 2 +#define VHOST_USER_MAX_VQS 2 /* * Set a reasonable maximum number of ram slots, which will be supported by @@ -121,7 +121,7 @@ struct vdev_memory { struct vu_dev { struct ctx *context; struct vdev_memory memory; - struct vu_virtq vq[VHOST_USER_MAX_QUEUES]; + struct vu_virtq vq[VHOST_USER_MAX_VQS]; uint64_t features; uint64_t protocol_features; int log_call_fd; |