aboutgitcodebugslistschat
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2024-01-16 11:50:42 +1100
committerStefano Brivio <sbrivio@redhat.com>2024-01-22 23:35:33 +0100
commit9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e (patch)
treecde62804974d309221bbfc181aecd3dcc55f2ddd
parent4a849e95267c30e63dbe61c4576c059c927b99d9 (diff)
downloadpasst-9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e.tar
passt-9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e.tar.gz
passt-9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e.tar.bz2
passt-9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e.tar.lz
passt-9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e.tar.xz
passt-9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e.tar.zst
passt-9c0881d4f6dd651fd2a40896b54d554cb7ba5b2e.zip
flow: Enforce that freeing of closed flows must happen in deferred handlers
Currently, flows are only evern finally freed (and the table compacted) from the deferred handlers. Some future ways we want to optimise managing the flow table will rely on this, so enforce it: rather than having the TCP code directly call flow_table_compact(), add a boolean return value to the per-flow deferred handlers. If true, this indicates that the flow code itself should free the flow. This forces all freeing of flows to occur during the flow code's scan of the table in flow_defer_handler() which opens possibilities for future optimisations. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
-rw-r--r--flow.c13
-rw-r--r--flow.h1
-rw-r--r--tcp.c9
-rw-r--r--tcp_conn.h4
-rw-r--r--tcp_splice.c9
5 files changed, 21 insertions, 15 deletions
diff --git a/flow.c b/flow.c
index 63eefd6..d6650fc 100644
--- a/flow.c
+++ b/flow.c
@@ -81,7 +81,7 @@ void flow_alloc_cancel(union flow *flow)
* @c: Execution context
* @hole: Pointer to recently closed flow
*/
-void flow_table_compact(const struct ctx *c, union flow *hole)
+static void flow_table_compact(const struct ctx *c, union flow *hole)
{
union flow *from;
@@ -131,18 +131,23 @@ void flow_defer_handler(const struct ctx *c, const struct timespec *now)
}
for (flow = flowtab + flow_count - 1; flow >= flowtab; flow--) {
+ bool closed = false;
+
switch (flow->f.type) {
case FLOW_TCP:
- tcp_flow_defer(c, flow);
+ closed = tcp_flow_defer(flow);
break;
case FLOW_TCP_SPLICE:
- tcp_splice_flow_defer(c, flow);
- if (timer)
+ closed = tcp_splice_flow_defer(flow);
+ if (!closed && timer)
tcp_splice_timer(c, flow);
break;
default:
/* Assume other flow types don't need any handling */
;
}
+
+ if (closed)
+ flow_table_compact(c, flow);
}
}
diff --git a/flow.h b/flow.h
index 44058bf..8064f0e 100644
--- a/flow.h
+++ b/flow.h
@@ -68,7 +68,6 @@ static inline bool flow_sidx_eq(flow_sidx_t a, flow_sidx_t b)
union flow;
-void flow_table_compact(const struct ctx *c, union flow *hole);
void flow_defer_handler(const struct ctx *c, const struct timespec *now);
void flow_log_(const struct flow_common *f, int pri, const char *fmt, ...)
diff --git a/tcp.c b/tcp.c
index 5b56786..ee2c3af 100644
--- a/tcp.c
+++ b/tcp.c
@@ -1304,21 +1304,22 @@ static struct tcp_tap_conn *tcp_hash_lookup(const struct ctx *c,
/**
* tcp_flow_defer() - Deferred per-flow handling (clean up closed connections)
- * @c: Execution context
* @flow: Flow table entry for this connection
+ *
+ * Return: true if the flow is ready to free, false otherwise
*/
-void tcp_flow_defer(const struct ctx *c, union flow *flow)
+bool tcp_flow_defer(union flow *flow)
{
const struct tcp_tap_conn *conn = &flow->tcp;
if (flow->tcp.events != CLOSED)
- return;
+ return false;
close(conn->sock);
if (conn->timer != -1)
close(conn->timer);
- flow_table_compact(c, flow);
+ return true;
}
static void tcp_rst_do(struct ctx *c, struct tcp_tap_conn *conn);
diff --git a/tcp_conn.h b/tcp_conn.h
index 825155a..636224e 100644
--- a/tcp_conn.h
+++ b/tcp_conn.h
@@ -158,8 +158,8 @@ extern int init_sock_pool6 [TCP_SOCK_POOL_SIZE];
void tcp_tap_conn_update(const struct ctx *c, struct tcp_tap_conn *old,
struct tcp_tap_conn *new);
void tcp_splice_conn_update(const struct ctx *c, struct tcp_splice_conn *new);
-void tcp_flow_defer(const struct ctx *c, union flow *flow);
-void tcp_splice_flow_defer(const struct ctx *c, union flow *flow);
+bool tcp_flow_defer(union flow *flow);
+bool tcp_splice_flow_defer(union flow *flow);
void tcp_splice_timer(const struct ctx *c, union flow *flow);
int tcp_conn_pool_sock(int pool[]);
int tcp_conn_new_sock(const struct ctx *c, sa_family_t af);
diff --git a/tcp_splice.c b/tcp_splice.c
index 3f6f1b3..daef7de 100644
--- a/tcp_splice.c
+++ b/tcp_splice.c
@@ -244,16 +244,17 @@ void tcp_splice_conn_update(const struct ctx *c, struct tcp_splice_conn *new)
/**
* tcp_splice_flow_defer() - Deferred per-flow handling (clean up closed)
- * @c: Execution context
* @flow: Flow table entry for this connection
+ *
+ * Return: true if the flow is ready to free, false otherwise
*/
-void tcp_splice_flow_defer(const struct ctx *c, union flow *flow)
+bool tcp_splice_flow_defer(union flow *flow)
{
struct tcp_splice_conn *conn = &flow->tcp_splice;
unsigned side;
if (!(flow->tcp_splice.flags & CLOSING))
- return;
+ return false;
for (side = 0; side < SIDES; side++) {
if (conn->events & SPLICE_ESTABLISHED) {
@@ -277,7 +278,7 @@ void tcp_splice_flow_defer(const struct ctx *c, union flow *flow)
conn->flags = 0;
flow_dbg(conn, "CLOSED");
- flow_table_compact(c, flow);
+ return true;
}
/**