In case anyone was wondering, the network is still just as fast as before, if not faster.
I did a few runs and I was reliably getting 800+ iterations with the standard test data set. The best was this one with 850, with an average latency of 890ms!
$ cat ~/dev/anttp/test/performance/src/localhost-autonomi-http.js; k6 run -u 10 -i 1000 ~/dev/anttp/test/performance/src/localhost-autonomi-http.js
import http from 'k6/http';
export default function () {
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_QdxdljdwBwR2QbAVr8scuw.png', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_dH5Ce6neTHIfEkAbmsr1BQ.jpeg', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_pt48p45dQmR5PBW8np1l8Q.png', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_sWZ4OWGeQjWs6urcPwR6Yw.png', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_ZT6qplX5Yt8PMCUqxq1lFQ.png', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_SxkGLnSNsMtu0SDrsWW8Wg.jpeg', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_bogEVpJvgx_gMHQoHMoSLg.jpeg', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/1_LFEyRQMHmxRnZtJwMozW5w.jpeg', { timeout: '600s' });
}
/\ Grafana /‾‾/
/\ / \ |\ __ / /
/ \/ \ | |/ / / ‾‾\
/ \ | ( | (‾) |
/ __________ \ |_|\_\ \_____/
execution: local
script: /home/paul/dev/anttp/test/performance/src/localhost-autonomi-http.js
output: -
scenarios: (100.00%) 1 scenario, 10 max VUs, 10m30s max duration (incl. graceful stop):
* default: 1000 iterations shared among 10 VUs (maxDuration: 10m0s, gracefulStop: 30s)
data_received..................: 3.0 GB 4.9 MB/s
data_sent......................: 1.2 MB 1.9 kB/s
dropped_iterations.............: 150 0.247673/s
http_req_blocked...............: avg=7.78µs min=1.25µs med=4.34µs max=2.02ms p(90)=11.25µs p(95)=16.28µs
http_req_connecting............: avg=300ns min=0s med=0s max=257.54µs p(90)=0s p(95)=0s
http_req_duration..............: avg=890.49ms min=488.54ms med=829.99ms max=2.69s p(90)=1.19s p(95)=1.32s
{ expected_response:true }...: avg=890.49ms min=488.54ms med=829.99ms max=2.69s p(90)=1.19s p(95)=1.32s
http_req_failed................: 0.00% 0 out of 6800
http_req_receiving.............: avg=523.08ms min=274.37ms med=458.33ms max=1.94s p(90)=791.08ms p(95)=890.04ms
http_req_sending...............: avg=39.73µs min=5.11µs med=14.47µs max=4.33ms p(90)=54.33µs p(95)=114.26µs
http_req_tls_handshaking.......: avg=0s min=0s med=0s max=0s p(90)=0s p(95)=0s
http_req_waiting...............: avg=367.36ms min=156.32ms med=343.25ms max=2.06s p(90)=499.77ms p(95)=558.8ms
http_reqs......................: 6800 11.227844/s
iteration_duration.............: avg=7.12s min=6s med=6.98s max=9.91s p(90)=7.8s p(95)=8.53s
iterations.....................: 850 1.403481/s
vus............................: 10 min=10 max=10
vus_max........................: 10 min=10 max=10
running (10m05.6s), 00/10 VUs, 850 complete and 0 interrupted iterations
default ✗ [===============================>------] 10 VUs 10m05.6s/10m0s 0850/1000 shared iters
For the large data set:
$ cat ~/dev/anttp/test/performance/src/localhost-large-autonomi-http.js; k6 run -u 10 -i 1000 ~/dev/anttp/test/performance/src/localhost-large-autonomi-http.js
import http from 'k6/http';
export default function () {
http.get('http://localhost:8080/91d16e58e9164bccd29a8fd8d25218a61d8253b51c26119791b2633ff4f6b309/to-autonomi.mp4', { timeout: '600s' });
http.get('http://localhost:8080/cec7a9eb2c644b9a5de58bbcdf2e893db9f0b2acd7fc563fc849e19d1f6bd872/st-patrick-monument.mp4', { timeout: '600s' });
http.get('http://localhost:8080/b6ec9f0f84cf6236dc42d3624679649f51024a57a58b2805552bb3aa690244dd/newcastle-promenade.mp4', { timeout: '600s' });
}
/\ Grafana /‾‾/
/\ / \ |\ __ / /
/ \/ \ | |/ / / ‾‾\
/ \ | ( | (‾) |
/ __________ \ |_|\_\ \_____/
execution: local
script: /home/paul/dev/anttp/test/performance/src/localhost-large-autonomi-http.js
output: -
scenarios: (100.00%) 1 scenario, 10 max VUs, 10m30s max duration (incl. graceful stop):
* default: 1000 iterations shared among 10 VUs (maxDuration: 10m0s, gracefulStop: 30s)
data_received..................: 29 GB 47 MB/s
data_sent......................: 99 kB 160 B/s
dropped_iterations.............: 800 1.291995/s
http_req_blocked...............: avg=23.32µs min=1.45µs med=12.26µs max=724.4µs p(90)=37.36µs p(95)=47.35µs
http_req_connecting............: avg=4.2µs min=0s med=0s max=288.43µs p(90)=0s p(95)=0s
http_req_duration..............: avg=10.31s min=1.22s med=8.23s max=26.04s p(90)=21.24s p(95)=22.43s
{ expected_response:true }...: avg=10.31s min=1.22s med=8.23s max=26.04s p(90)=21.24s p(95)=22.43s
http_req_failed................: 0.00% 0 out of 600
http_req_receiving.............: avg=9.84s min=802.38ms med=7.79s max=25.52s p(90)=20.74s p(95)=21.95s
http_req_sending...............: avg=442.21µs min=5.65µs med=36.27µs max=96.49ms p(90)=116.77µs p(95)=193.05µs
http_req_tls_handshaking.......: avg=0s min=0s med=0s max=0s p(90)=0s p(95)=0s
http_req_waiting...............: avg=476.77ms min=196.4ms med=440.4ms max=1.07s p(90)=681.93ms p(95)=815.23ms
http_reqs......................: 600 0.968996/s
iteration_duration.............: avg=30.95s min=27.12s med=30.81s max=34.33s p(90)=33.21s p(95)=33.89s
iterations.....................: 200 0.322999/s
vus............................: 10 min=10 max=10
vus_max........................: 10 min=10 max=10
running (10m19.2s), 00/10 VUs, 200 complete and 0 interrupted iterations
default ✗ [======>-------------------------------] 10 VUs 10m19.2s/10m0s 0200/1000 shared iters
Looking at the throughput, we’re up on before by a fair margin - 47 MB/s (375 Mbit/s)!
I was curious to see what that looked like through my router, and it was a similar ballpark. I’m assuming the difference is due to the data being compressed on download from Autonomi. There may also be some caching going on in the library, but pure guessing there!
Note that about 55 Mbit/s is autonomi nodes… so it’s about 185 Mbit/s for this traffic. It’s a bit rough, but gives us some confirmation at least.