diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2024-07-18 15:26:30 +1000 |
---|---|---|
committer | Stefano Brivio <sbrivio@redhat.com> | 2024-07-19 18:32:44 +0200 |
commit | 52d45f1737ef802b09039aa31b5c137593f3cc2e (patch) | |
tree | fbdac19f01331492f6dc2507307e3be8aa684a59 /tcp_buf.c | |
parent | f9fe212b1f2bc105939bd2603991dbcd0e6a3b5f (diff) | |
download | passt-52d45f1737ef802b09039aa31b5c137593f3cc2e.tar passt-52d45f1737ef802b09039aa31b5c137593f3cc2e.tar.gz passt-52d45f1737ef802b09039aa31b5c137593f3cc2e.tar.bz2 passt-52d45f1737ef802b09039aa31b5c137593f3cc2e.tar.lz passt-52d45f1737ef802b09039aa31b5c137593f3cc2e.tar.xz passt-52d45f1737ef802b09039aa31b5c137593f3cc2e.tar.zst passt-52d45f1737ef802b09039aa31b5c137593f3cc2e.zip |
tcp: Obtain guest address from flowside
Currently we always deliver inbound TCP packets to the guest's most
recent observed IP address. This has the odd side effect that if the
guest changes its IP address with active TCP connections we might
deliver packets from old connections to the new address. That won't
work; it will probably result in an RST from the guest. Worse, if the
guest added a new address but also retains the old one, then we could
break those old connections by redirecting them to the new address.
Now that we maintain flowside information, we have a record of the correct
guest side address and can just use it.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Diffstat (limited to 'tcp_buf.c')
-rw-r--r-- | tcp_buf.c | 6 |
1 files changed, 3 insertions, 3 deletions
@@ -316,7 +316,7 @@ int tcp_buf_send_flag(struct ctx *c, struct tcp_tap_conn *conn, int flags) return ret; } - l4len = tcp_l2_buf_fill_headers(c, conn, iov, optlen, NULL, seq); + l4len = tcp_l2_buf_fill_headers(conn, iov, optlen, NULL, seq); iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (flags & DUP_ACK) { @@ -373,7 +373,7 @@ static void tcp_data_to_tap(const struct ctx *c, struct tcp_tap_conn *conn, tcp4_frame_conns[tcp4_payload_used] = conn; iov = tcp4_l2_iov[tcp4_payload_used++]; - l4len = tcp_l2_buf_fill_headers(c, conn, iov, dlen, check, seq); + l4len = tcp_l2_buf_fill_headers(conn, iov, dlen, check, seq); iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (tcp4_payload_used > TCP_FRAMES_MEM - 1) tcp_payload_flush(c); @@ -381,7 +381,7 @@ static void tcp_data_to_tap(const struct ctx *c, struct tcp_tap_conn *conn, tcp6_frame_conns[tcp6_payload_used] = conn; iov = tcp6_l2_iov[tcp6_payload_used++]; - l4len = tcp_l2_buf_fill_headers(c, conn, iov, dlen, NULL, seq); + l4len = tcp_l2_buf_fill_headers(conn, iov, dlen, NULL, seq); iov[TCP_IOV_PAYLOAD].iov_len = l4len; if (tcp6_payload_used > TCP_FRAMES_MEM - 1) tcp_payload_flush(c); |