diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2024-01-16 11:50:41 +1100 |
---|---|---|
committer | Stefano Brivio <sbrivio@redhat.com> | 2024-01-22 23:35:31 +0100 |
commit | 4a849e95267c30e63dbe61c4576c059c927b99d9 (patch) | |
tree | 5d73a19f7ee2bb10633deabcf7956bddcc10cdf4 /tcp.c | |
parent | fb7c00169d49a99ea2d14dae05bf0c8190f0971c (diff) | |
download | passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.gz passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.bz2 passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.lz passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.xz passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.zst passt-4a849e95267c30e63dbe61c4576c059c927b99d9.zip |
flow: Abstract allocation of new flows with helper function
Currently tcp.c open codes the process of allocating a new flow from the
flow table: twice, in fact, once for guest to host and once for host to
guest connections. This duplication isn't ideal and will get worse as we
add more protocols to the flow table. It also makes it harder to
experiment with different ways of handling flow table allocation.
Instead, introduce a function to allocate a new flow: flow_alloc(). In
some cases we currently check if we're able to allocate, but delay the
actual allocation. We now handle that slightly differently with a
flow_alloc_cancel() function to back out a recent allocation. We have that
separate from a flow_free() function, because future changes we have in
mind will need to handle this case a little differently.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Diffstat (limited to 'tcp.c')
-rw-r--r-- | tcp.c | 29 |
1 files changed, 18 insertions, 11 deletions
@@ -1944,17 +1944,18 @@ static void tcp_conn_from_tap(struct ctx *c, }; const struct sockaddr *sa; struct tcp_tap_conn *conn; + union flow *flow; socklen_t sl; int s, mss; (void)saddr; - if (flow_count >= FLOW_MAX) + if (!(flow = flow_alloc())) return; if ((s = tcp_conn_pool_sock(pool)) < 0) if ((s = tcp_conn_new_sock(c, af)) < 0) - return; + goto cancel; if (!c->no_map_gw) { if (af == AF_INET && IN4_ARE_ADDR_EQUAL(daddr, &c->ip4.gw)) @@ -1969,13 +1970,11 @@ static void tcp_conn_from_tap(struct ctx *c, .sin6_addr = c->ip6.addr_ll, .sin6_scope_id = c->ifi6, }; - if (bind(s, (struct sockaddr *)&addr6_ll, sizeof(addr6_ll))) { - close(s); - return; - } + if (bind(s, (struct sockaddr *)&addr6_ll, sizeof(addr6_ll))) + goto cancel; } - conn = CONN(flow_count++); + conn = &flow->tcp; conn->f.type = FLOW_TCP; conn->sock = s; conn->timer = -1; @@ -2047,6 +2046,12 @@ static void tcp_conn_from_tap(struct ctx *c, } tcp_epoll_ctl(c, conn); + return; + +cancel: + if (s >= 0) + close(s); + flow_alloc_cancel(flow); } /** @@ -2724,14 +2729,12 @@ void tcp_listen_handler(struct ctx *c, union epoll_ref ref, union flow *flow; int s; - if (c->no_tcp || flow_count >= FLOW_MAX) + if (c->no_tcp || !(flow = flow_alloc())) return; s = accept4(ref.fd, (struct sockaddr *)&sa, &sl, SOCK_NONBLOCK); if (s < 0) - return; - - flow = flowtab + flow_count++; + goto cancel; if (c->mode == MODE_PASTA && tcp_splice_conn_from_sock(c, ref.tcp_listen, &flow->tcp_splice, @@ -2740,6 +2743,10 @@ void tcp_listen_handler(struct ctx *c, union epoll_ref ref, tcp_tap_conn_from_sock(c, ref.tcp_listen, &flow->tcp, s, (struct sockaddr *)&sa, now); + return; + +cancel: + flow_alloc_cancel(flow); } /** |