outlet/routing: fix lookup benchmark
Some checks failed
CI / 🤖 Check dependabot status (push) Has been cancelled
CI / 🐧 Test on Linux (${{ github.ref_type == 'tag' }}, misc) (push) Has been cancelled
CI / 🐧 Test on Linux (coverage) (push) Has been cancelled
CI / 🐧 Test on Linux (regular) (push) Has been cancelled
CI / ❄️ Build on Nix (push) Has been cancelled
CI / 🍏 Build and test on macOS (push) Has been cancelled
CI / 🧪 End-to-end testing (push) Has been cancelled
CI / 🔍 Upload code coverage (push) Has been cancelled
CI / 🔬 Test only Go (push) Has been cancelled
CI / 🔬 Test only JS (${{ needs.dependabot.outputs.package-ecosystem }}, 20) (push) Has been cancelled
CI / 🔬 Test only JS (${{ needs.dependabot.outputs.package-ecosystem }}, 22) (push) Has been cancelled
CI / 🔬 Test only JS (${{ needs.dependabot.outputs.package-ecosystem }}, 24) (push) Has been cancelled
CI / ⚖️ Check licenses (push) Has been cancelled
CI / 🐋 Build Docker images (push) Has been cancelled
CI / 🐋 Tag Docker images (push) Has been cancelled
CI / 🚀 Publish release (push) Has been cancelled
Update Nix dependency hashes / Update dependency hashes (push) Has been cancelled

The lookup benchmark was incorrect. When looking up a large number of
prefixes on each loop, b.Loop() calibrate on a larger and less precise
value than if it were measuring only one lookup where it would iterate
more to get a precise timing.

The problem may also exist for the insertion benchmark, but it's
difficult to do only one insertion per loop, as after many iterations,
there is nothing more we can insert. I suppose BART's author is not
trying to benchmark insertions because of this.

See https://github.com/akvorado/akvorado/pull/2040 and
https://github.com/gaissmai/bart/issues/351#issuecomment-3428806758.
This commit is contained in:
Vincent Bernat
2025-10-21 22:09:34 +02:00
parent 405e922f4d
commit 1cc33d4cc3
2 changed files with 14 additions and 27 deletions

View File

@@ -187,9 +187,10 @@ func BenchmarkNetworks(b *testing.B) {
<-c.networksCSVReady
for b.Loop() {
if err := c.networksCSVRefresh(); err != nil {
b.Fatalf("networksCSVRefresh() error:\n%+v", err)
err = c.networksCSVRefresh()
}
if err != nil {
b.Fatalf("networksCSVRefresh() error:\n%+v", err)
}
b.ReportMetric(0, "ns/op")
b.ReportMetric(float64(b.Elapsed())/float64(b.N)/1_000_000, "ms/op")

View File

@@ -204,18 +204,6 @@ func TestRandomRealWorldRoutes4Distribution(t *testing.T) {
}
}
func BenchmarkRandomRealWorldRoutes4(b *testing.B) {
prng1 := rand.New(rand.NewSource(1))
prng2 := rand.New(rand.NewSource(2))
for b.Loop() {
for route := range randomRealWorldRoutes4(prng1, prng2, 1000) {
_ = route
}
}
b.ReportMetric(0, "ns/op")
b.ReportMetric(float64(b.Elapsed())/float64(b.N)/1000, "ns/route")
}
func BenchmarkRIBInsertion(b *testing.B) {
for _, routes := range []int{1_000, 10_000, 100_000} {
for _, peers := range []int{1, 2, 5} {
@@ -283,7 +271,13 @@ func BenchmarkRIBInsertion(b *testing.B) {
}
func BenchmarkRIBLookup(b *testing.B) {
for _, routes := range []int{1_000, 10_000, 100_000} {
for _, routes := range []int{1_000, 10_000, 100_000, 1_000_000} {
prng1 := rand.New(rand.NewSource(10))
prng2 := rand.New(rand.NewSource(0))
randomPrefixes := []randomRoute{}
for r := range randomRealWorldRoutes4(prng1, prng2, routes/10) {
randomPrefixes = append(randomPrefixes, r)
}
for _, peers := range []int{1, 2, 5} {
name := fmt.Sprintf("%d routes, %d peers", routes, peers)
@@ -317,19 +311,11 @@ func BenchmarkRIBLookup(b *testing.B) {
}
}
prng1 = rand.New(rand.NewSource(10))
lookups := 0
randomPrefixes := []randomRoute{}
for r := range randomRealWorldRoutes4(prng1, prng2[0], routes/10) {
randomPrefixes = append(randomPrefixes, r)
}
for b.Loop() {
for _, r := range randomPrefixes {
_, _ = rib.tree.Lookup(netip.AddrFrom16(r.Prefix.Addr().As16()))
lookups++
ip4 := randomPrefixes[b.N%len(randomPrefixes)].Prefix.Addr()
_, _ = rib.tree.Lookup(ip4)
}
}
b.ReportMetric(float64(b.Elapsed())/float64(lookups), "ns/op")
b.ReportMetric(float64(b.Elapsed())/float64(b.N), "ns/op")
})
}
}