aboutgitcodebugslistschat
path: root/test/perf/pasta_tcp
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2023-11-06 18:08:28 +1100
committerStefano Brivio <sbrivio@redhat.com>2023-11-07 09:56:10 +0100
commite516809a74ffd495481a7adf6b565181861a41f9 (patch)
tree1b99cba3173477677b3b0e6ac203db673fca0cd2 /test/perf/pasta_tcp
parentf9ff6678d4bbf5d9c80c1c6f784c3955468c09d6 (diff)
downloadpasst-e516809a74ffd495481a7adf6b565181861a41f9.tar
passt-e516809a74ffd495481a7adf6b565181861a41f9.tar.gz
passt-e516809a74ffd495481a7adf6b565181861a41f9.tar.bz2
passt-e516809a74ffd495481a7adf6b565181861a41f9.tar.lz
passt-e516809a74ffd495481a7adf6b565181861a41f9.tar.xz
passt-e516809a74ffd495481a7adf6b565181861a41f9.tar.zst
passt-e516809a74ffd495481a7adf6b565181861a41f9.zip
test/perf: Start iperf3 server less often
Currently we start both the iperf3 server(s) and client(s) afresh each time we want to make a bandwidth measurement. That's not really necessary as usually a whole batch of bandwidth measurements can use the same server. Split up the iperf3 directive into 3 directives: iperf3s to start the server, iperf3 to make a measurement and iperf3k to kill the server, so that we can start the server less often. This - and more importantly, the reduced number of waits for the server to be ready - reduces runtime of the performance tests on my laptop by about 4m (out of ~28minutes). For now we still restart the server between IPv4 and IPv6 tests. That's because in some cases the latency measurements we make in between use the same ports. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Diffstat (limited to 'test/perf/pasta_tcp')
-rw-r--r--test/perf/pasta_tcp77
1 files changed, 52 insertions, 25 deletions
diff --git a/test/perf/pasta_tcp b/test/perf/pasta_tcp
index 9e9dc37..a8938c3 100644
--- a/test/perf/pasta_tcp
+++ b/test/perf/pasta_tcp
@@ -37,21 +37,24 @@ report pasta lo_tcp __THREADS__ __FREQ__
th MTU 1500B 4000B 16384B 65535B
-
tr TCP throughput over IPv6: ns to host
+iperf3s host 100${i}3 __THREADS__
+
ns ip link set dev lo mtu 1500
-iperf3 BW ns host ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
ns ip link set dev lo mtu 4000
-iperf3 BW ns host ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
ns ip link set dev lo mtu 16384
-iperf3 BW ns host ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
ns ip link set dev lo mtu 65535
-iperf3 BW ns host ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns ::1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
+iperf3k host
+
tl TCP RR latency over IPv6: ns to host
lat -
lat -
@@ -72,19 +75,23 @@ lat __LAT__ 500 350
tr TCP throughput over IPv4: ns to host
+iperf3s host 100${i}3 __THREADS__
+
ns ip link set dev lo mtu 1500
-iperf3 BW ns host 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
ns ip link set dev lo mtu 4000
-iperf3 BW ns host 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
ns ip link set dev lo mtu 16384
-iperf3 BW ns host 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
ns ip link set dev lo mtu 65535
-iperf3 BW ns host 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
+iperf3 BW ns 127.0.0.1 100${i}3 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
+iperf3k host
+
tl TCP RR latency over IPv4: ns to host
lat -
lat -
@@ -103,14 +110,17 @@ nsout LAT tcp_crr --nolog -P 10003 -C 10013 -4 -c -H 127.0.0.1 | sed -n 's/^thro
hostw
lat __LAT__ 500 350
-
tr TCP throughput over IPv6: host to ns
+iperf3s ns 100${i}2 __THREADS__
+
bw -
bw -
bw -
-iperf3 BW host ns ::1 100${i}2 __THREADS__ __TIME__ __OPTS__
+iperf3 BW host ::1 100${i}2 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
+iperf3k ns
+
tl TCP RR latency over IPv6: host to ns
lat -
lat -
@@ -131,12 +141,16 @@ lat __LAT__ 1000 700
tr TCP throughput over IPv4: host to ns
+iperf3s ns 100${i}2 __THREADS__
+
bw -
bw -
bw -
-iperf3 BW host ns 127.0.0.1 100${i}2 __THREADS__ __TIME__ __OPTS__
+iperf3 BW host 127.0.0.1 100${i}2 __THREADS__ __TIME__ __OPTS__
bw __BW__ 15.0 20.0
+iperf3k ns
+
tl TCP RR latency over IPv4: host to ns
lat -
lat -
@@ -158,7 +172,6 @@ lat __LAT__ 1000 700
te
-
test pasta: throughput and latency (connections via tap)
nsout GW ip -j -4 route show|jq -rM '.[] | select(.dst == "default").gateway'
@@ -173,21 +186,24 @@ report pasta tap_tcp __THREADS__ __FREQ__
th MTU 1500B 4000B 16384B 65520B
-
tr TCP throughput over IPv6: ns to host
+iperf3s host 100${i}3 __THREADS__
+
ns ip link set dev __IFNAME__ mtu 1500
-iperf3 BW ns host __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 512k
+iperf3 BW ns __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 512k
bw __BW__ 0.2 0.4
ns ip link set dev __IFNAME__ mtu 4000
-iperf3 BW ns host __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 1M
+iperf3 BW ns __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 1M
bw __BW__ 0.3 0.5
ns ip link set dev __IFNAME__ mtu 16384
-iperf3 BW ns host __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
+iperf3 BW ns __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
bw __BW__ 1.5 2.0
ns ip link set dev __IFNAME__ mtu 65520
-iperf3 BW ns host __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
+iperf3 BW ns __GW6__%__IFNAME__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
bw __BW__ 2.0 2.5
+iperf3k host
+
tl TCP RR latency over IPv6: ns to host
lat -
lat -
@@ -208,19 +224,23 @@ lat __LAT__ 1500 500
tr TCP throughput over IPv4: ns to host
+iperf3s host 100${i}3 __THREADS__
+
ns ip link set dev __IFNAME__ mtu 1500
-iperf3 BW ns host __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 512k
+iperf3 BW ns __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 512k
bw __BW__ 0.2 0.4
ns ip link set dev __IFNAME__ mtu 4000
-iperf3s BW ns host __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 1M
+iperf3 BW ns __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 1M
bw __BW__ 0.3 0.5
ns ip link set dev __IFNAME__ mtu 16384
-iperf3 BW ns host __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
+iperf3 BW ns __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
bw __BW__ 1.5 2.0
ns ip link set dev __IFNAME__ mtu 65520
-iperf3 BW ns host __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
+iperf3 BW ns __GW__ 100${i}3 __THREADS__ __TIME__ __OPTS__ -w 8M
bw __BW__ 2.0 2.5
+iperf3k host
+
tl TCP RR latency over IPv4: ns to host
lat -
lat -
@@ -239,16 +259,19 @@ nsout LAT tcp_crr --nolog -P 10003 -C 10013 -4 -c -H __GW__ | sed -n 's/^through
hostw
lat __LAT__ 1500 500
-
tr TCP throughput over IPv6: host to ns
+iperf3s ns 100${i}2 __THREADS__
+
nsout IFNAME ip -j link show | jq -rM '.[] | select(.link_type == "ether").ifname'
nsout ADDR6 ip -j -6 addr show|jq -rM '.[] | select(.ifname == "__IFNAME__").addr_info[] | select(.scope == "global" and .prefixlen == 64).local'
bw -
bw -
bw -
-iperf3 BW host ns __ADDR6__ 100${i}2 __THREADS__ __TIME__ __OPTS__
+iperf3 BW host __ADDR6__ 100${i}2 __THREADS__ __TIME__ __OPTS__
bw __BW__ 8.0 10.0
+iperf3k ns
+
tl TCP RR latency over IPv6: host to ns
lat -
lat -
@@ -270,13 +293,17 @@ lat __LAT__ 5000 10000
tr TCP throughput over IPv4: host to ns
+iperf3s ns 100${i}2 __THREADS__
+
nsout ADDR ip -j -4 addr show|jq -rM '.[] | select(.ifname == "__IFNAME__").addr_info[0].local'
bw -
bw -
bw -
-iperf3 BW host ns __ADDR__ 100${i}2 __THREADS__ __TIME__ __OPTS__
+iperf3 BW host __ADDR__ 100${i}2 __THREADS__ __TIME__ __OPTS__
bw __BW__ 8.0 10.0
+iperf3k ns
+
tl TCP RR latency over IPv4: host to ns
lat -
lat -