aboutgitcodebugslistschat
path: root/tcp.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2023-12-07 16:53:53 +1100
committerStefano Brivio <sbrivio@redhat.com>2023-12-27 19:29:45 +0100
commit5d5bb8c1501e6ee02568d086e36081a756e45e51 (patch)
tree0ad3763402db0e9d05d34c3b767d1bd726ee9b70 /tcp.c
parent64e5459ba644930ef99ea56dd3df92101bd364ff (diff)
downloadpasst-5d5bb8c1501e6ee02568d086e36081a756e45e51.tar
passt-5d5bb8c1501e6ee02568d086e36081a756e45e51.tar.gz
passt-5d5bb8c1501e6ee02568d086e36081a756e45e51.tar.bz2
passt-5d5bb8c1501e6ee02568d086e36081a756e45e51.tar.lz
passt-5d5bb8c1501e6ee02568d086e36081a756e45e51.tar.xz
passt-5d5bb8c1501e6ee02568d086e36081a756e45e51.tar.zst
passt-5d5bb8c1501e6ee02568d086e36081a756e45e51.zip
tcp: Don't account for hash table size in tcp_hash()
Currently tcp_hash() returns the hash bucket for a value, that is the hash modulo the size of the hash table. Usually it's a bit more flexible to have hash functions return a "raw" hash value and perform the modulus in the callers. That allows the same hash function to be used for multiple tables of different sizes, or to re-use the hash for other purposes. We don't do anything like that with tcp_hash() at present, but we have some plans to do so. Prepare for that by making tcp_hash() and tcp_conn_hash() return raw hash values. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Diffstat (limited to 'tcp.c')
-rw-r--r--tcp.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/tcp.c b/tcp.c
index cdaf2c2..53a699e 100644
--- a/tcp.c
+++ b/tcp.c
@@ -1160,18 +1160,15 @@ static int tcp_hash_match(const struct tcp_tap_conn *conn,
* @eport: Guest side endpoint port
* @fport: Guest side forwarding port
*
- * Return: hash value, already modulo size of the hash table
+ * Return: hash value, needs to be adjusted for table size
*/
-static unsigned int tcp_hash(const struct ctx *c, const union inany_addr *faddr,
- in_port_t eport, in_port_t fport)
+static uint64_t tcp_hash(const struct ctx *c, const union inany_addr *faddr,
+ in_port_t eport, in_port_t fport)
{
struct siphash_state state = SIPHASH_INIT(c->hash_secret);
- uint64_t hash;
inany_siphash_feed(&state, faddr);
- hash = siphash_final(&state, 20, (uint64_t)eport << 16 | fport);
-
- return (unsigned int)(hash % TCP_HASH_TABLE_SIZE);
+ return siphash_final(&state, 20, (uint64_t)eport << 16 | fport);
}
/**
@@ -1179,10 +1176,10 @@ static unsigned int tcp_hash(const struct ctx *c, const union inany_addr *faddr,
* @c: Execution context
* @conn: Connection
*
- * Return: hash value, already modulo size of the hash table
+ * Return: hash value, needs to be adjusted for table size
*/
-static unsigned int tcp_conn_hash(const struct ctx *c,
- const struct tcp_tap_conn *conn)
+static uint64_t tcp_conn_hash(const struct ctx *c,
+ const struct tcp_tap_conn *conn)
{
return tcp_hash(c, &conn->faddr, conn->eport, conn->fport);
}
@@ -1199,7 +1196,7 @@ static inline unsigned tcp_hash_probe(const struct ctx *c,
const struct tcp_tap_conn *conn)
{
flow_sidx_t sidx = FLOW_SIDX(conn, TAPSIDE);
- unsigned b = tcp_conn_hash(c, conn);
+ unsigned b = tcp_conn_hash(c, conn) % TCP_HASH_TABLE_SIZE;
/* Linear probing */
while (!flow_sidx_eq(tc_hash[b], FLOW_SIDX_NONE) &&
@@ -1242,7 +1239,7 @@ static void tcp_hash_remove(const struct ctx *c,
for (s = mod_sub(b, 1, TCP_HASH_TABLE_SIZE);
(flow = flow_at_sidx(tc_hash[s]));
s = mod_sub(s, 1, TCP_HASH_TABLE_SIZE)) {
- unsigned h = tcp_conn_hash(c, &flow->tcp);
+ unsigned h = tcp_conn_hash(c, &flow->tcp) % TCP_HASH_TABLE_SIZE;
if (!mod_between(h, s, b, TCP_HASH_TABLE_SIZE)) {
/* tc_hash[s] can live in tc_hash[b]'s slot */
@@ -1298,7 +1295,7 @@ static struct tcp_tap_conn *tcp_hash_lookup(const struct ctx *c,
inany_from_af(&aany, af, faddr);
- b = tcp_hash(c, &aany, eport, fport);
+ b = tcp_hash(c, &aany, eport, fport) % TCP_HASH_TABLE_SIZE;
while ((flow = flow_at_sidx(tc_hash[b])) &&
!tcp_hash_match(&flow->tcp, &aany, eport, fport))
b = mod_sub(b, 1, TCP_HASH_TABLE_SIZE);