aboutgitcodebugslistschat
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2024-01-16 11:50:41 +1100
committerStefano Brivio <sbrivio@redhat.com>2024-01-22 23:35:31 +0100
commit4a849e95267c30e63dbe61c4576c059c927b99d9 (patch)
tree5d73a19f7ee2bb10633deabcf7956bddcc10cdf4
parentfb7c00169d49a99ea2d14dae05bf0c8190f0971c (diff)
downloadpasst-4a849e95267c30e63dbe61c4576c059c927b99d9.tar
passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.gz
passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.bz2
passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.lz
passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.xz
passt-4a849e95267c30e63dbe61c4576c059c927b99d9.tar.zst
passt-4a849e95267c30e63dbe61c4576c059c927b99d9.zip
flow: Abstract allocation of new flows with helper function
Currently tcp.c open codes the process of allocating a new flow from the flow table: twice, in fact, once for guest to host and once for host to guest connections. This duplication isn't ideal and will get worse as we add more protocols to the flow table. It also makes it harder to experiment with different ways of handling flow table allocation. Instead, introduce a function to allocate a new flow: flow_alloc(). In some cases we currently check if we're able to allocate, but delay the actual allocation. We now handle that slightly differently with a flow_alloc_cancel() function to back out a recent allocation. We have that separate from a flow_free() function, because future changes we have in mind will need to handle this case a little differently. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
-rw-r--r--flow.c26
-rw-r--r--flow_table.h3
-rw-r--r--tcp.c29
3 files changed, 47 insertions, 11 deletions
diff --git a/flow.c b/flow.c
index 64de75c..63eefd6 100644
--- a/flow.c
+++ b/flow.c
@@ -51,6 +51,32 @@ void flow_log_(const struct flow_common *f, int pri, const char *fmt, ...)
}
/**
+ * flow_alloc() - Allocate a new flow
+ *
+ * Return: pointer to an unused flow entry, or NULL if the table is full
+ */
+union flow *flow_alloc(void)
+{
+ if (flow_count >= FLOW_MAX)
+ return NULL;
+
+ return &flowtab[flow_count++];
+}
+
+/**
+ * flow_alloc_cancel() - Free a newly allocated flow
+ * @flow: Flow to deallocate
+ *
+ * @flow must be the last flow allocated by flow_alloc()
+ */
+void flow_alloc_cancel(union flow *flow)
+{
+ ASSERT(FLOW_IDX(flow) == flow_count - 1);
+ memset(flow, 0, sizeof(*flow));
+ flow_count--;
+}
+
+/**
* flow_table_compact() - Perform compaction on flow table
* @c: Execution context
* @hole: Pointer to recently closed flow
diff --git a/flow_table.h b/flow_table.h
index 4aa2398..2773a2b 100644
--- a/flow_table.h
+++ b/flow_table.h
@@ -88,4 +88,7 @@ static inline flow_sidx_t flow_sidx(const struct flow_common *f,
*/
#define FLOW_SIDX(f_, side) (flow_sidx(&(f_)->f, (side)))
+union flow *flow_alloc(void);
+void flow_alloc_cancel(union flow *flow);
+
#endif /* FLOW_TABLE_H */
diff --git a/tcp.c b/tcp.c
index 6b62896..5b56786 100644
--- a/tcp.c
+++ b/tcp.c
@@ -1944,17 +1944,18 @@ static void tcp_conn_from_tap(struct ctx *c,
};
const struct sockaddr *sa;
struct tcp_tap_conn *conn;
+ union flow *flow;
socklen_t sl;
int s, mss;
(void)saddr;
- if (flow_count >= FLOW_MAX)
+ if (!(flow = flow_alloc()))
return;
if ((s = tcp_conn_pool_sock(pool)) < 0)
if ((s = tcp_conn_new_sock(c, af)) < 0)
- return;
+ goto cancel;
if (!c->no_map_gw) {
if (af == AF_INET && IN4_ARE_ADDR_EQUAL(daddr, &c->ip4.gw))
@@ -1969,13 +1970,11 @@ static void tcp_conn_from_tap(struct ctx *c,
.sin6_addr = c->ip6.addr_ll,
.sin6_scope_id = c->ifi6,
};
- if (bind(s, (struct sockaddr *)&addr6_ll, sizeof(addr6_ll))) {
- close(s);
- return;
- }
+ if (bind(s, (struct sockaddr *)&addr6_ll, sizeof(addr6_ll)))
+ goto cancel;
}
- conn = CONN(flow_count++);
+ conn = &flow->tcp;
conn->f.type = FLOW_TCP;
conn->sock = s;
conn->timer = -1;
@@ -2047,6 +2046,12 @@ static void tcp_conn_from_tap(struct ctx *c,
}
tcp_epoll_ctl(c, conn);
+ return;
+
+cancel:
+ if (s >= 0)
+ close(s);
+ flow_alloc_cancel(flow);
}
/**
@@ -2724,14 +2729,12 @@ void tcp_listen_handler(struct ctx *c, union epoll_ref ref,
union flow *flow;
int s;
- if (c->no_tcp || flow_count >= FLOW_MAX)
+ if (c->no_tcp || !(flow = flow_alloc()))
return;
s = accept4(ref.fd, (struct sockaddr *)&sa, &sl, SOCK_NONBLOCK);
if (s < 0)
- return;
-
- flow = flowtab + flow_count++;
+ goto cancel;
if (c->mode == MODE_PASTA &&
tcp_splice_conn_from_sock(c, ref.tcp_listen, &flow->tcp_splice,
@@ -2740,6 +2743,10 @@ void tcp_listen_handler(struct ctx *c, union epoll_ref ref,
tcp_tap_conn_from_sock(c, ref.tcp_listen, &flow->tcp, s,
(struct sockaddr *)&sa, now);
+ return;
+
+cancel:
+ flow_alloc_cancel(flow);
}
/**