Skip to content

Commit

Permalink
some benchmarks.
Browse files Browse the repository at this point in the history
old lmdb:

goos: linux
goarch: amd64
pkg: github.com/fiatjaf/eventstore/test
cpu: AMD Ryzen 3 3200G with Radeon Vega Graphics
BenchmarkDBs/lmdb/filter/q-0-4         	    1000	     25128 ns/op
BenchmarkDBs/lmdb/filter/q-1-4         	    1000	      2082 ns/op
BenchmarkDBs/lmdb/filter/q-2-4         	    1000	      3826 ns/op
BenchmarkDBs/lmdb/filter/q-3-4         	    1000	      1017 ns/op
BenchmarkDBs/lmdb/filter/q-4-4         	    1000	      1632 ns/op
BenchmarkDBs/lmdb/filter/q-5-4         	    1000	      2421 ns/op
BenchmarkDBs/lmdb/filter/q-6-4         	    1000	      2091 ns/op
BenchmarkDBs/lmdb/filter/q-7-4         	    1000	      1941 ns/op
BenchmarkDBs/lmdb/filter/q-8-4         	    1000	      1365 ns/op
BenchmarkDBs/lmdb/filter/q-9-4         	    1000	      1173 ns/op
BenchmarkDBs/lmdb/filter/q-10-4        	    1000	      2298 ns/op
BenchmarkDBs/lmdb/filter/q-11-4        	    1000	      1473 ns/op
BenchmarkDBs/lmdb/insert-4             	    1000	     99234 ns/op

new lmdb:

goos: linux
goarch: amd64
pkg: github.com/fiatjaf/eventstore/test
cpu: AMD Ryzen 3 3200G with Radeon Vega Graphics
BenchmarkDBs/lmdb/filter/q-0-4         	    1000	     22811 ns/op
BenchmarkDBs/lmdb/filter/q-1-4         	    1000	      2202 ns/op
BenchmarkDBs/lmdb/filter/q-2-4         	    1000	      3115 ns/op
BenchmarkDBs/lmdb/filter/q-3-4         	    1000	      1285 ns/op
BenchmarkDBs/lmdb/filter/q-4-4         	    1000	      1528 ns/op
BenchmarkDBs/lmdb/filter/q-5-4         	    1000	      2352 ns/op
BenchmarkDBs/lmdb/filter/q-6-4         	    1000	      2824 ns/op
BenchmarkDBs/lmdb/filter/q-7-4         	    1000	      3080 ns/op
BenchmarkDBs/lmdb/filter/q-8-4         	    1000	      1416 ns/op
BenchmarkDBs/lmdb/filter/q-9-4         	    1000	      1383 ns/op
BenchmarkDBs/lmdb/filter/q-10-4        	    1000	      1455 ns/op
BenchmarkDBs/lmdb/filter/q-11-4        	    1000	      1286 ns/op
BenchmarkDBs/lmdb/insert-4             	    1000	     88692 ns/op

so it's not super better but at least it's not worse, and I think in real world it will be better.

badger:

goos: linux
goarch: amd64
pkg: github.com/fiatjaf/eventstore/test
cpu: AMD Ryzen 3 3200G with Radeon Vega Graphics
BenchmarkDBs/badger/filter/q-0-4         	    1000	      1406 ns/op
BenchmarkDBs/badger/filter/q-1-4         	    1000	      1374 ns/op
BenchmarkDBs/badger/filter/q-2-4         	    1000	      3300 ns/op
BenchmarkDBs/badger/filter/q-3-4         	    1000	       848.3 ns/op
BenchmarkDBs/badger/filter/q-4-4         	    1000	      1447 ns/op
BenchmarkDBs/badger/filter/q-5-4         	    1000	      1170 ns/op
BenchmarkDBs/badger/filter/q-6-4         	    1000	      1247 ns/op
BenchmarkDBs/badger/filter/q-7-4         	    1000	      1280 ns/op
BenchmarkDBs/badger/filter/q-8-4         	    1000	      1091 ns/op
BenchmarkDBs/badger/filter/q-9-4         	    1000	      1225 ns/op
BenchmarkDBs/badger/filter/q-10-4        	    1000	       906.7 ns/op
BenchmarkDBs/badger/filter/q-11-4        	    1000	       892.8 ns/op
BenchmarkDBs/badger/insert-4             	    1000	    153319 ns/op

it looks better than LMDB, but I think it's worse in the real world because it uses too much memory and have
all the ristretto cache overhead that looks like it goes not very well with the golang gc, who knows.

sqlite:

BenchmarkDBs/sqlite/filter/q-0-4        	      50	   3910252 ns/op
BenchmarkDBs/sqlite/filter/q-1-4        	      50	   3352545 ns/op
BenchmarkDBs/sqlite/filter/q-2-4        	      50	  12234810 ns/op
BenchmarkDBs/sqlite/filter/q-3-4        	      50	    441476 ns/op
BenchmarkDBs/sqlite/filter/q-4-4        	      50	    179682 ns/op
BenchmarkDBs/sqlite/filter/q-5-4        	      50	   3773350 ns/op
BenchmarkDBs/sqlite/filter/q-6-4        	      50	   3614942 ns/op
BenchmarkDBs/sqlite/filter/q-7-4        	      50	   4040850 ns/op
BenchmarkDBs/sqlite/filter/q-8-4        	      50	   4702405 ns/op
BenchmarkDBs/sqlite/filter/q-9-4        	      50	   5153673 ns/op
BenchmarkDBs/sqlite/filter/q-10-4       	      50	   5840877 ns/op
BenchmarkDBs/sqlite/filter/q-11-4       	      50	  33000137 ns/op

(insert doesn't complete)

by the way these benchmarks are all wrong because they keep halting the computer for some reason. these DBs may not be well-suited for the incessant calling that the go benchmark framework does, I don't know. arbitrarily reducing the number of iterations almost made them work, but tweaking that value yields very different results so everything is useless.
  • Loading branch information
fiatjaf committed Sep 26, 2024
1 parent 1908e0d commit 1370af0
Showing 1 changed file with 118 additions and 0 deletions.
118 changes: 118 additions & 0 deletions test/benchmark_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
package test

import (
"encoding/binary"
"encoding/hex"
"fmt"
"os"
"testing"

"github.com/fiatjaf/eventstore"
"github.com/fiatjaf/eventstore/badger"
"github.com/fiatjaf/eventstore/lmdb"
"github.com/fiatjaf/eventstore/slicestore"
"github.com/fiatjaf/eventstore/sqlite3"
"github.com/nbd-wtf/go-nostr"
)

func BenchmarkDBs(b *testing.B) {
s := &slicestore.SliceStore{}
s.Init()

b.Run("slice", func(b *testing.B) {
runBenchmarkOn(b, s)
})

os.RemoveAll(dbpath + "lmdb")
l := &lmdb.LMDBBackend{Path: dbpath + "lmdb"}
l.Init()

b.Run("lmdb", func(b *testing.B) {
runBenchmarkOn(b, l)
})

os.RemoveAll(dbpath + "badger")
d := &badger.BadgerBackend{Path: dbpath + "badger"}
d.Init()

b.Run("badger", func(b *testing.B) {
runBenchmarkOn(b, d)
})

os.RemoveAll(dbpath + "sqlite")
q := &sqlite3.SQLite3Backend{DatabaseURL: dbpath + "sqlite", QueryTagsLimit: 50}
q.Init()

b.Run("sqlite", func(b *testing.B) {
runBenchmarkOn(b, q)
})
}

func runBenchmarkOn(b *testing.B, db eventstore.Store) {
for i := 0; i < 10000; i++ {
eTag := make([]byte, 32)
binary.BigEndian.PutUint16(eTag, uint16(i))

ref, _ := nostr.GetPublicKey(sk3)
if i%3 == 0 {
ref, _ = nostr.GetPublicKey(sk4)
}

evt := &nostr.Event{
CreatedAt: nostr.Timestamp(i*10 + 2),
Content: fmt.Sprintf("hello %d", i),
Tags: nostr.Tags{
{"t", fmt.Sprintf("t%d", i)},
{"e", hex.EncodeToString(eTag)},
{"p", ref},
},
Kind: i % 10,
}
sk := sk3
if i%3 == 0 {
sk = sk4
}
evt.Sign(sk)
db.SaveEvent(ctx, evt)
}

filters := make([]nostr.Filter, 0, 10)
filters = append(filters, nostr.Filter{Kinds: []int{1, 4, 8, 16}})
pk3, _ := nostr.GetPublicKey(sk3)
filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}})
filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}, Kinds: []int{3, 4}})
filters = append(filters, nostr.Filter{})
filters = append(filters, nostr.Filter{Limit: 20})
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3}}})
pk4, _ := nostr.GetPublicKey(sk4)
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}})
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}})
eTags := make([]string, 20)
for i := 0; i < 20; i++ {
eTag := make([]byte, 32)
binary.BigEndian.PutUint16(eTag, uint16(i))
eTags[i] = hex.EncodeToString(eTag)
}
filters = append(filters, nostr.Filter{Kinds: []int{9}, Tags: nostr.TagMap{"e": eTags}})
filters = append(filters, nostr.Filter{Kinds: []int{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}})
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}, Limit: 50})

b.Run("filter", func(b *testing.B) {
for q, filter := range filters {
b.Run(fmt.Sprintf("q-%d", q), func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, _ = db.QueryEvents(ctx, filter)
}
})
}
})

b.Run("insert", func(b *testing.B) {
evt := &nostr.Event{Kind: 788, CreatedAt: nostr.Now(), Content: "blergh", Tags: nostr.Tags{{"t", "spam"}}}
evt.Sign(sk4)
for i := 0; i < b.N; i++ {
db.SaveEvent(ctx, evt)
}
})
}

0 comments on commit 1370af0

Please sign in to comment.