diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2024-07-26 17:20:31 +1000 |
---|---|---|
committer | Stefano Brivio <sbrivio@redhat.com> | 2024-07-26 14:07:42 +0200 |
commit | 57a21d2df1467302dee71ee9d5683a8b96e6ce7f (patch) | |
tree | fa0904c3966dce139a0be6422d3cfebc2e583886 /tap.c | |
parent | 37e3b24d90278288b6b6216da249b5d45fc4c411 (diff) | |
download | passt-57a21d2df1467302dee71ee9d5683a8b96e6ce7f.tar passt-57a21d2df1467302dee71ee9d5683a8b96e6ce7f.tar.gz passt-57a21d2df1467302dee71ee9d5683a8b96e6ce7f.tar.bz2 passt-57a21d2df1467302dee71ee9d5683a8b96e6ce7f.tar.lz passt-57a21d2df1467302dee71ee9d5683a8b96e6ce7f.tar.xz passt-57a21d2df1467302dee71ee9d5683a8b96e6ce7f.tar.zst passt-57a21d2df1467302dee71ee9d5683a8b96e6ce7f.zip |
tap: Improve handling of partially received frames on qemu socket2024_07_26.57a21d2
Because the Unix socket to qemu is a stream socket, we have no guarantee
of where the boundaries between recv() calls will lie. Typically they
will lie on frame boundaries, because that's how qemu will send then, but
we can't rely on it.
Currently we handle this case by detecting when we have received a partial
frame and performing a blocking recv() to get the remainder, and only then
processing the frames. Change it so instead we save the partial frame
persistently and include it as the first thing processed next time we
receive data from the socket. This handles a number of (unlikely) cases
which previously would not be dealt with correctly:
* If qemu sent a partial frame then waited some time before sending the
remainder, previously we could block here for an unacceptably long time
* If qemu sent a tiny partial frame (< 4 bytes) we'd leave the loop without
doing the partial frame handling, which would put us out of sync with
the stream from qemu
* If a the blocking recv() only received some of the remainder of the
frame, not all of it, we'd return leaving us out of sync with the
stream again
Caveat: This could memmove() a moderate amount of data (ETH_MAX_MTU). This
is probably acceptable because it's an unlikely case in practice. If
necessary we could mitigate this by using a true ring buffer.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Diffstat (limited to 'tap.c')
-rw-r--r-- | tap.c | 36 |
1 files changed, 23 insertions, 13 deletions
@@ -989,6 +989,8 @@ static void tap_sock_reset(struct ctx *c) void tap_handler_passt(struct ctx *c, uint32_t events, const struct timespec *now) { + static const char *partial_frame; + static ssize_t partial_len = 0; ssize_t n; char *p; @@ -997,11 +999,18 @@ void tap_handler_passt(struct ctx *c, uint32_t events, return; } - p = pkt_buf; - tap_flush_pools(); - n = recv(c->fd_tap, p, TAP_BUF_FILL, MSG_DONTWAIT); + if (partial_len) { + /* We have a partial frame from an earlier pass. Move it to the + * start of the buffer, top up with new data, then process all + * of it. + */ + memmove(pkt_buf, partial_frame, partial_len); + } + + n = recv(c->fd_tap, pkt_buf + partial_len, TAP_BUF_BYTES - partial_len, + MSG_DONTWAIT); if (n < 0) { if (errno != EINTR && errno != EAGAIN && errno != EWOULDBLOCK) { err_perror("Receive error on guest connection, reset"); @@ -1010,7 +1019,10 @@ void tap_handler_passt(struct ctx *c, uint32_t events, return; } - while (n > (ssize_t)sizeof(uint32_t)) { + p = pkt_buf; + n += partial_len; + + while (n >= (ssize_t)sizeof(uint32_t)) { uint32_t l2len = ntohl_unaligned(p); if (l2len < sizeof(struct ethhdr) || l2len > ETH_MAX_MTU) { @@ -1019,24 +1031,22 @@ void tap_handler_passt(struct ctx *c, uint32_t events, return; } + if (l2len + sizeof(uint32_t) > (size_t)n) + /* Leave this incomplete frame for later */ + break; + p += sizeof(uint32_t); n -= sizeof(uint32_t); - /* At most one packet might not fit in a single read, and this - * needs to be blocking. - */ - if (l2len > n) { - ssize_t rem = recv(c->fd_tap, p + n, l2len - n, 0); - if ((n += rem) != l2len) - return; - } - tap_add_packet(c, l2len, p); p += l2len; n -= l2len; } + partial_len = n; + partial_frame = p; + tap_handler(c, now); } |