ethdb: add benchmark test suite (#26659)

This commit is contained in:
rjl493456442 2023-02-10 17:35:00 +08:00 committed by GitHub
parent b0cd8c4a5c
commit 0ea65d4020
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 147 additions and 1 deletions

View File

@ -18,6 +18,7 @@ package dbtest
import ( import (
"bytes" "bytes"
"math/rand"
"reflect" "reflect"
"sort" "sort"
"testing" "testing"
@ -377,6 +378,101 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) {
}) })
} }
// BenchDatabaseSuite runs a suite of benchmarks against a KeyValueStore database
// implementation.
func BenchDatabaseSuite(b *testing.B, New func() ethdb.KeyValueStore) {
var (
keys, vals = makeDataset(1_000_000, 32, 32, false)
sKeys, sVals = makeDataset(1_000_000, 32, 32, true)
)
// Run benchmarks sequentially
b.Run("Write", func(b *testing.B) {
benchWrite := func(b *testing.B, keys, vals [][]byte) {
b.ResetTimer()
b.ReportAllocs()
db := New()
defer db.Close()
for i := 0; i < len(keys); i++ {
db.Put(keys[i], vals[i])
}
}
b.Run("WriteSorted", func(b *testing.B) {
benchWrite(b, sKeys, sVals)
})
b.Run("WriteRandom", func(b *testing.B) {
benchWrite(b, keys, vals)
})
})
b.Run("Read", func(b *testing.B) {
benchRead := func(b *testing.B, keys, vals [][]byte) {
db := New()
defer db.Close()
for i := 0; i < len(keys); i++ {
db.Put(keys[i], vals[i])
}
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < len(keys); i++ {
db.Get(keys[i])
}
}
b.Run("ReadSorted", func(b *testing.B) {
benchRead(b, sKeys, sVals)
})
b.Run("ReadRandom", func(b *testing.B) {
benchRead(b, keys, vals)
})
})
b.Run("Iteration", func(b *testing.B) {
benchIteration := func(b *testing.B, keys, vals [][]byte) {
db := New()
defer db.Close()
for i := 0; i < len(keys); i++ {
db.Put(keys[i], vals[i])
}
b.ResetTimer()
b.ReportAllocs()
it := db.NewIterator(nil, nil)
for it.Next() {
}
it.Release()
}
b.Run("IterationSorted", func(b *testing.B) {
benchIteration(b, sKeys, sVals)
})
b.Run("IterationRandom", func(b *testing.B) {
benchIteration(b, keys, vals)
})
})
b.Run("BatchWrite", func(b *testing.B) {
benchBatchWrite := func(b *testing.B, keys, vals [][]byte) {
b.ResetTimer()
b.ReportAllocs()
db := New()
defer db.Close()
batch := db.NewBatch()
for i := 0; i < len(keys); i++ {
batch.Put(keys[i], vals[i])
}
batch.Write()
}
b.Run("BenchWriteSorted", func(b *testing.B) {
benchBatchWrite(b, sKeys, sVals)
})
b.Run("BenchWriteRandom", func(b *testing.B) {
benchBatchWrite(b, keys, vals)
})
})
}
func iterateKeys(it ethdb.Iterator) []string { func iterateKeys(it ethdb.Iterator) []string {
keys := []string{} keys := []string{}
for it.Next() { for it.Next() {
@ -386,3 +482,25 @@ func iterateKeys(it ethdb.Iterator) []string {
it.Release() it.Release()
return keys return keys
} }
// randomHash generates a random blob of data and returns it as a hash.
func randBytes(len int) []byte {
buf := make([]byte, len)
if n, err := rand.Read(buf); n != len || err != nil {
panic(err)
}
return buf
}
func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) {
var keys [][]byte
var vals [][]byte
for i := 0; i < size; i += 1 {
keys = append(keys, randBytes(ksize))
vals = append(vals, randBytes(vsize))
}
if order {
sort.Slice(keys, func(i, j int) bool { return bytes.Compare(keys[i], keys[j]) < 0 })
}
return keys, vals
}

View File

@ -38,3 +38,15 @@ func TestLevelDB(t *testing.T) {
}) })
}) })
} }
func BenchmarkLevelDB(b *testing.B) {
dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
b.Fatal(err)
}
return &Database{
db: db,
}
})
}

View File

@ -272,7 +272,9 @@ func (d *Database) NewBatch() ethdb.Batch {
} }
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. // NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
// TODO can't do this with pebble. Batches are allocated in a pool so maybe this doesn't matter? // It's not supported by pebble, but pebble has better memory allocation strategy
// which turns out a lot faster than leveldb. It's performant enough to construct
// batch object without any pre-allocated space.
func (d *Database) NewBatchWithSize(_ int) ethdb.Batch { func (d *Database) NewBatchWithSize(_ int) ethdb.Batch {
return &batch{ return &batch{
b: d.db.NewBatch(), b: d.db.NewBatch(),

View File

@ -42,3 +42,17 @@ func TestPebbleDB(t *testing.T) {
}) })
}) })
} }
func BenchmarkPebbleDB(b *testing.B) {
dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore {
db, err := pebble.Open("", &pebble.Options{
FS: vfs.NewMem(),
})
if err != nil {
b.Fatal(err)
}
return &Database{
db: db,
}
})
}