refactor: remove db (#13370)

* remove db

* Update CHANGELOG.md
This commit is contained in:
Jacob Gadikian 2022-09-23 20:08:26 +07:00 committed by GitHub
parent 8dd708d9cc
commit 6cbb587e18
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 1 additions and 3867 deletions

View File

@ -112,6 +112,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking Changes
* !(storev2alpha1) [#13370](https://github.com/cosmos/cosmos-sdk/pull/13370) remove storev2alpha1
* (context) [#13063](https://github.com/cosmos/cosmos-sdk/pull/13063) Update `Context#CacheContext` to automatically emit all events on the parent context's `EventManager`.
* (x/bank) [#12706](https://github.com/cosmos/cosmos-sdk/pull/12706) Removed the `testutil` package from the `x/bank/client` package.
* (simapp) [#12747](https://github.com/cosmos/cosmos-sdk/pull/12747) Remove `simapp.MakeTestEncodingConfig`. Please use `moduletestutil.MakeTestEncodingConfig` (`types/module/testutil`) in tests instead.

View File

@ -1,72 +0,0 @@
# Key-Value Database
Databases supporting mappings of arbitrary byte sequences.
## Interfaces
The database interface types consist of objects to encapsulate the singular connection to the DB, transactions being made to it, historical version state, and iteration.
### `Connection`
This interface represents a connection to a versioned key-value database. All versioning operations are performed using methods on this type.
* The `Versions` method returns a `VersionSet` which represents an immutable view of the version history at the current state.
* Version history is modified via the `{Save,Delete}Version` methods.
* Operations on version history do not modify any database contents.
### `DBReader`, `DBWriter`, and `DBReadWriter`
These types represent transactions on the database contents. Their methods provide CRUD operations as well as iteration.
* Writeable transactions call `Commit` flushes operations to the source DB.
* All open transactions must be closed with `Discard` or `Commit` before a new version can be saved on the source DB.
* The maximum number of safely concurrent transactions is dependent on the backend implementation.
* A single transaction object is not safe for concurrent use.
* Write conflicts on concurrent transactions will cause an error at commit time (optimistic concurrency control).
#### `Iterator`
* An iterator is invalidated by any writes within its `Domain` to the source transaction while it is open.
* An iterator must call `Close` before its source transaction is closed.
### `VersionSet`
This represents a self-contained and immutable view of a database's version history state. It is therefore safe to retain and conccurently access any instance of this object.
## Implementations
### In-memory DB
The in-memory DB in the `db/memdb` package cannot be persisted to disk. It is implemented using the Google [btree](https://pkg.go.dev/github.com/google/btree) library.
* This currently does not perform write conflict detection, so it only supports a single open write-transaction at a time. Multiple and concurrent read-transactions are supported.
### BadgerDB
A [BadgerDB](https://pkg.go.dev/github.com/dgraph-io/badger/v3)-based backend. Internally, this uses BadgerDB's ["managed" mode](https://pkg.go.dev/github.com/dgraph-io/badger/v3#OpenManaged) for version management.
Note that Badger only recognizes write conflicts for rows that are read _after_ a conflicting transaction was opened. In other words, the following will raise an error:
```go
tx1, tx2 := db.Writer(), db.ReadWriter()
key := []byte("key")
tx2.Get(key)
tx1.Set(key, []byte("a"))
tx2.Set(key, []byte("b"))
tx1.Commit() // ok
err := tx2.Commit() // err is non-nil
```
But this will not:
```go
tx1, tx2 := db.Writer(), db.ReadWriter()
key := []byte("key")
tx1.Set(key, []byte("a"))
tx2.Set(key, []byte("b"))
tx1.Commit() // ok
tx2.Commit() // ok
```
### RocksDB
A [RocksDB](https://github.com/facebook/rocksdb)-based backend. Internally this uses [`OptimisticTransactionDB`](https://github.com/facebook/rocksdb/wiki/Transactions#optimistictransactiondb) to allow concurrent transactions with write conflict detection. Historical versioning is internally implemented with [Checkpoints](https://github.com/facebook/rocksdb/wiki/Checkpoints).

View File

@ -1,23 +0,0 @@
package db
type readerRWAdapter struct{ Reader }
// ReaderAsReadWriter returns a ReadWriter that forwards to a reader and errors if writes are
// attempted. Can be used to pass a Reader when a ReadWriter is expected
// but no writes will actually occur.
func ReaderAsReadWriter(r Reader) ReadWriter {
return readerRWAdapter{r}
}
func (readerRWAdapter) Set([]byte, []byte) error {
return ErrReadOnly
}
func (readerRWAdapter) Delete([]byte) error {
return ErrReadOnly
}
func (rw readerRWAdapter) Commit() error {
rw.Discard()
return nil
}

View File

@ -1,518 +0,0 @@
package badgerdb
import (
"bytes"
"context"
"encoding/csv"
"errors"
"os"
"path/filepath"
"strconv"
"sync"
"sync/atomic"
"github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/db/internal"
"github.com/dgraph-io/badger/v3"
bpb "github.com/dgraph-io/badger/v3/pb"
"github.com/dgraph-io/ristretto/z"
)
var versionsFilename = "versions.csv"
var (
_ db.Connection = (*BadgerDB)(nil)
_ db.Reader = (*badgerTxn)(nil)
_ db.Writer = (*badgerWriter)(nil)
_ db.ReadWriter = (*badgerWriter)(nil)
)
// BadgerDB is a connection to a BadgerDB key-value database.
type BadgerDB struct {
db *badger.DB
vmgr *versionManager
mtx sync.RWMutex
openWriters int32
}
type badgerTxn struct {
txn *badger.Txn
db *BadgerDB
}
type badgerWriter struct {
badgerTxn
discarded bool
}
type badgerIterator struct {
reverse bool
start, end []byte
iter *badger.Iterator
lastErr error
// Whether iterator has been advanced to the first element (is fully initialized)
primed bool
}
// Map our versions to Badger timestamps.
//
// A badger Txn's commit timestamp must be strictly greater than a record's "last-read"
// timestamp in order to detect conflicts, and a Txn must be read at a timestamp after last
// commit to see current state. So we must use commit increments that are more
// granular than our version interval, and map versions to the corresponding timestamp.
type versionManager struct {
*db.VersionManager
vmap map[uint64]uint64
lastTs uint64
}
// NewDB creates or loads a BadgerDB key-value database inside the given directory.
// If dir does not exist, it will be created.
func NewDB(dir string) (*BadgerDB, error) {
// Since Badger doesn't support database names, we join both to obtain
// the final directory to use for the database.
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, err
}
opts := badger.DefaultOptions(dir)
opts.SyncWrites = false // note that we have Sync methods
opts.Logger = nil // badger is too chatty by default
return NewDBWithOptions(opts)
}
// NewDBWithOptions creates a BadgerDB key-value database with the specified Options
// (https://pkg.go.dev/github.com/dgraph-io/badger/v3#Options)
func NewDBWithOptions(opts badger.Options) (*BadgerDB, error) {
d, err := badger.OpenManaged(opts)
if err != nil {
return nil, err
}
vmgr, err := readVersionsFile(filepath.Join(opts.Dir, versionsFilename))
if err != nil {
return nil, err
}
return &BadgerDB{
db: d,
vmgr: vmgr,
}, nil
}
// Load metadata CSV file containing valid versions
func readVersionsFile(path string) (*versionManager, error) {
file, err := os.OpenFile(path, os.O_RDONLY|os.O_CREATE, 0o644)
if err != nil {
return nil, err
}
defer file.Close()
r := csv.NewReader(file)
r.FieldsPerRecord = 2
rows, err := r.ReadAll()
if err != nil {
return nil, err
}
var (
versions []uint64
lastTs uint64
)
vmap := map[uint64]uint64{}
for _, row := range rows {
version, err := strconv.ParseUint(row[0], 10, 64)
if err != nil {
return nil, err
}
ts, err := strconv.ParseUint(row[1], 10, 64)
if err != nil {
return nil, err
}
if version == 0 { // 0 maps to the latest timestamp
lastTs = ts
}
versions = append(versions, version)
vmap[version] = ts
}
vmgr := db.NewVersionManager(versions)
return &versionManager{
VersionManager: vmgr,
vmap: vmap,
lastTs: lastTs,
}, nil
}
// Write version metadata to CSV file
func writeVersionsFile(vm *versionManager, path string) error {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0o644)
if err != nil {
return err
}
defer file.Close()
w := csv.NewWriter(file)
rows := [][]string{
{"0", strconv.FormatUint(vm.lastTs, 10)},
}
for it := vm.Iterator(); it.Next(); {
version := it.Value()
ts, ok := vm.vmap[version]
if !ok {
panic("version not mapped to ts")
}
rows = append(rows, []string{
strconv.FormatUint(it.Value(), 10),
strconv.FormatUint(ts, 10),
})
}
return w.WriteAll(rows)
}
func (b *BadgerDB) Reader() db.Reader {
b.mtx.RLock()
ts := b.vmgr.lastTs
b.mtx.RUnlock()
return &badgerTxn{txn: b.db.NewTransactionAt(ts, false), db: b}
}
func (b *BadgerDB) ReaderAt(version uint64) (db.Reader, error) {
b.mtx.RLock()
defer b.mtx.RUnlock()
ts, has := b.vmgr.versionTs(version)
if !has {
return nil, db.ErrVersionDoesNotExist
}
return &badgerTxn{txn: b.db.NewTransactionAt(ts, false), db: b}, nil
}
func (b *BadgerDB) ReadWriter() db.ReadWriter {
atomic.AddInt32(&b.openWriters, 1)
b.mtx.RLock()
ts := b.vmgr.lastTs
b.mtx.RUnlock()
return &badgerWriter{badgerTxn{txn: b.db.NewTransactionAt(ts, true), db: b}, false}
}
func (b *BadgerDB) Writer() db.Writer {
// Badger has a WriteBatch, but it doesn't support conflict detection
return b.ReadWriter()
}
func (b *BadgerDB) Close() error {
b.mtx.Lock()
defer b.mtx.Unlock()
writeVersionsFile(b.vmgr, filepath.Join(b.db.Opts().Dir, versionsFilename))
return b.db.Close()
}
// Versions implements Connection.
// Returns a VersionSet that is valid until the next call to SaveVersion or DeleteVersion.
func (b *BadgerDB) Versions() (db.VersionSet, error) {
b.mtx.RLock()
defer b.mtx.RUnlock()
return b.vmgr, nil
}
func (b *BadgerDB) save(target uint64) (uint64, error) {
b.mtx.Lock()
defer b.mtx.Unlock()
if b.openWriters > 0 {
return 0, db.ErrOpenTransactions
}
b.vmgr = b.vmgr.Copy()
return b.vmgr.Save(target)
}
// SaveNextVersion implements Connection.
func (b *BadgerDB) SaveNextVersion() (uint64, error) {
return b.save(0)
}
// SaveVersion implements Connection.
func (b *BadgerDB) SaveVersion(target uint64) error {
if target == 0 {
return db.ErrInvalidVersion
}
_, err := b.save(target)
return err
}
func (b *BadgerDB) DeleteVersion(target uint64) error {
b.mtx.Lock()
defer b.mtx.Unlock()
if !b.vmgr.Exists(target) {
return db.ErrVersionDoesNotExist
}
b.vmgr = b.vmgr.Copy()
b.vmgr.Delete(target)
return nil
}
func (b *BadgerDB) Revert() error {
b.mtx.RLock()
defer b.mtx.RUnlock()
if b.openWriters > 0 {
return db.ErrOpenTransactions
}
// Revert from latest commit timestamp to last "saved" timestamp
// if no versions exist, use 0 as it precedes any possible commit timestamp
var target uint64
last := b.vmgr.Last()
if last == 0 {
target = 0
} else {
var has bool
if target, has = b.vmgr.versionTs(last); !has {
return errors.New("bad version history")
}
}
lastTs := b.vmgr.lastTs
if target == lastTs {
return nil
}
// Badger provides no way to rollback committed data, so we undo all changes
// since the target version using the Stream API
stream := b.db.NewStreamAt(lastTs)
// Skips unchanged keys
stream.ChooseKey = func(item *badger.Item) bool { return item.Version() > target }
// Scans for value at target version
stream.KeyToList = func(key []byte, itr *badger.Iterator) (*bpb.KVList, error) {
kv := bpb.KV{Key: key}
// advance down to <= target version
itr.Next() // we have at least one newer version
for itr.Valid() && bytes.Equal(key, itr.Item().Key()) && itr.Item().Version() > target {
itr.Next()
}
if itr.Valid() && bytes.Equal(key, itr.Item().Key()) && !itr.Item().IsDeletedOrExpired() {
var err error
kv.Value, err = itr.Item().ValueCopy(nil)
if err != nil {
return nil, err
}
}
return &bpb.KVList{Kv: []*bpb.KV{&kv}}, nil
}
txn := b.db.NewTransactionAt(lastTs, true)
defer txn.Discard()
stream.Send = func(buf *z.Buffer) error {
kvl, err := badger.BufferToKVList(buf)
if err != nil {
return err
}
// nil Value indicates a deleted entry
for _, kv := range kvl.Kv {
if kv.Value == nil {
err = txn.Delete(kv.Key)
if err != nil {
return err
}
} else {
err = txn.Set(kv.Key, kv.Value)
if err != nil {
return err
}
}
}
return nil
}
err := stream.Orchestrate(context.Background())
if err != nil {
return err
}
return txn.CommitAt(lastTs, nil)
}
func (b *BadgerDB) Stats() map[string]string { return nil }
func (tx *badgerTxn) Get(key []byte) ([]byte, error) {
if len(key) == 0 {
return nil, db.ErrKeyEmpty
}
item, err := tx.txn.Get(key)
if err == badger.ErrKeyNotFound {
return nil, nil
} else if err != nil {
return nil, err
}
val, err := item.ValueCopy(nil)
if err == nil && val == nil {
val = []byte{}
}
return val, err
}
func (tx *badgerTxn) Has(key []byte) (bool, error) {
if len(key) == 0 {
return false, db.ErrKeyEmpty
}
_, err := tx.txn.Get(key)
if err != nil && err != badger.ErrKeyNotFound {
return false, err
}
return (err != badger.ErrKeyNotFound), nil
}
func (tx *badgerWriter) Set(key, value []byte) error {
if err := dbutil.ValidateKv(key, value); err != nil {
return err
}
return tx.txn.Set(key, value)
}
func (tx *badgerWriter) Delete(key []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
return tx.txn.Delete(key)
}
func (tx *badgerWriter) Commit() (err error) {
if tx.discarded {
return errors.New("transaction has been discarded")
}
defer func() { err = dbutil.CombineErrors(err, tx.Discard(), "Discard also failed") }()
// Commit to the current commit timestamp, after ensuring it is > ReadTs
tx.db.mtx.RLock()
tx.db.vmgr.updateCommitTs(tx.txn.ReadTs())
ts := tx.db.vmgr.lastTs
tx.db.mtx.RUnlock()
err = tx.txn.CommitAt(ts, nil)
return
}
func (tx *badgerTxn) Discard() error {
tx.txn.Discard()
return nil
}
func (tx *badgerWriter) Discard() error {
if !tx.discarded {
defer atomic.AddInt32(&tx.db.openWriters, -1)
tx.discarded = true
}
return tx.badgerTxn.Discard()
}
func (tx *badgerTxn) iteratorOpts(start, end []byte, opts badger.IteratorOptions) (*badgerIterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
iter := tx.txn.NewIterator(opts)
iter.Rewind()
iter.Seek(start)
if opts.Reverse && iter.Valid() && bytes.Equal(iter.Item().Key(), start) {
// If we're going in reverse, our starting point was "end", which is exclusive.
iter.Next()
}
return &badgerIterator{
reverse: opts.Reverse,
start: start,
end: end,
iter: iter,
primed: false,
}, nil
}
func (tx *badgerTxn) Iterator(start, end []byte) (db.Iterator, error) {
opts := badger.DefaultIteratorOptions
return tx.iteratorOpts(start, end, opts)
}
func (tx *badgerTxn) ReverseIterator(start, end []byte) (db.Iterator, error) {
opts := badger.DefaultIteratorOptions
opts.Reverse = true
return tx.iteratorOpts(end, start, opts)
}
func (i *badgerIterator) Close() error {
i.iter.Close()
return nil
}
func (i *badgerIterator) Domain() (start, end []byte) { return i.start, i.end }
func (i *badgerIterator) Error() error { return i.lastErr }
func (i *badgerIterator) Next() bool {
if !i.primed {
i.primed = true
} else {
i.iter.Next()
}
return i.Valid()
}
func (i *badgerIterator) Valid() bool {
if !i.iter.Valid() {
return false
}
if len(i.end) > 0 {
key := i.iter.Item().Key()
if c := bytes.Compare(key, i.end); (!i.reverse && c >= 0) || (i.reverse && c < 0) {
// We're at the end key, or past the end.
return false
}
}
return true
}
func (i *badgerIterator) Key() []byte {
if !i.Valid() {
panic("iterator is invalid")
}
return i.iter.Item().KeyCopy(nil)
}
func (i *badgerIterator) Value() []byte {
if !i.Valid() {
panic("iterator is invalid")
}
val, err := i.iter.Item().ValueCopy(nil)
if err != nil {
i.lastErr = err
}
return val
}
func (vm *versionManager) versionTs(ver uint64) (uint64, bool) {
ts, has := vm.vmap[ver]
return ts, has
}
// updateCommitTs increments the lastTs if equal to readts.
func (vm *versionManager) updateCommitTs(readts uint64) {
if vm.lastTs == readts {
vm.lastTs++
}
}
// Atomically accesses the last commit timestamp used as a version marker.
func (vm *versionManager) lastCommitTs() uint64 {
return atomic.LoadUint64(&vm.lastTs)
}
func (vm *versionManager) Copy() *versionManager {
vmap := map[uint64]uint64{}
for ver, ts := range vm.vmap {
vmap[ver] = ts
}
return &versionManager{
VersionManager: vm.VersionManager.Copy(),
vmap: vmap,
lastTs: vm.lastCommitTs(),
}
}
func (vm *versionManager) Save(target uint64) (uint64, error) {
id, err := vm.VersionManager.Save(target)
if err != nil {
return 0, err
}
vm.vmap[id] = vm.lastTs // non-atomic, already guarded by the vmgr mutex
return id, nil
}
func (vm *versionManager) Delete(target uint64) {
vm.VersionManager.Delete(target)
delete(vm.vmap, target)
}

View File

@ -1,41 +0,0 @@
package badgerdb
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/dbtest"
)
func load(t *testing.T, dir string) db.Connection {
d, err := NewDB(dir)
require.NoError(t, err)
return d
}
func TestGetSetHasDelete(t *testing.T) {
dbtest.DoTestGetSetHasDelete(t, load)
}
func TestIterators(t *testing.T) {
dbtest.DoTestIterators(t, load)
}
func TestTransactions(t *testing.T) {
dbtest.DoTestTransactions(t, load, true)
}
func TestVersioning(t *testing.T) {
dbtest.DoTestVersioning(t, load)
}
func TestRevert(t *testing.T) {
dbtest.DoTestRevert(t, load, false)
dbtest.DoTestRevert(t, load, true)
}
func TestReloadDB(t *testing.T) {
dbtest.DoTestReloadDB(t, load)
}

View File

@ -1,109 +0,0 @@
package dbtest
import (
"bytes"
"encoding/binary"
"math/rand"
"testing"
"github.com/stretchr/testify/require"
dbm "github.com/cosmos/cosmos-sdk/db"
)
func Int64ToBytes(i int64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(i))
return buf
}
func BytesToInt64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}
func BenchmarkRangeScans(b *testing.B, db dbm.ReadWriter, dbSize int64) {
b.StopTimer()
rangeSize := int64(10000)
if dbSize < rangeSize {
b.Errorf("db size %v cannot be less than range size %v", dbSize, rangeSize)
}
for i := int64(0); i < dbSize; i++ {
bytes := Int64ToBytes(i)
err := db.Set(bytes, bytes)
if err != nil {
// require.NoError() is very expensive (according to profiler), so check manually
b.Fatal(b, err)
}
}
b.StartTimer()
for i := 0; i < b.N; i++ {
start := rand.Int63n(dbSize - rangeSize)
end := start + rangeSize
iter, err := db.Iterator(Int64ToBytes(start), Int64ToBytes(end))
require.NoError(b, err)
count := 0
for iter.Next() {
count++
}
iter.Close()
require.EqualValues(b, rangeSize, count)
}
}
func BenchmarkRandomReadsWrites(b *testing.B, db dbm.ReadWriter) {
b.StopTimer()
// create dummy data
const numItems = int64(1000000)
internal := map[int64]int64{}
for i := 0; i < int(numItems); i++ {
internal[int64(i)] = int64(0)
}
b.StartTimer()
for i := 0; i < b.N; i++ {
{
idx := rand.Int63n(numItems)
internal[idx]++
val := internal[idx]
idxBytes := Int64ToBytes(idx)
valBytes := Int64ToBytes(val)
err := db.Set(idxBytes, valBytes)
if err != nil {
// require.NoError() is very expensive (according to profiler), so check manually
b.Fatal(b, err)
}
}
{
idx := rand.Int63n(numItems)
valExp := internal[idx]
idxBytes := Int64ToBytes(idx)
valBytes, err := db.Get(idxBytes)
if err != nil {
b.Fatal(b, err)
}
if valExp == 0 {
if !bytes.Equal(valBytes, nil) {
b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes)
break
}
} else {
if len(valBytes) != 8 {
b.Errorf("Expected length 8 for %v, got %X", idx, valBytes)
break
}
valGot := BytesToInt64(valBytes)
if valExp != valGot {
b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot)
break
}
}
}
}
}

View File

@ -1,588 +0,0 @@
package dbtest
import (
"fmt"
"sort"
"sync"
"testing"
"github.com/stretchr/testify/require"
dbm "github.com/cosmos/cosmos-sdk/db"
)
type Loader func(*testing.T, string) dbm.Connection
func ikey(i int) []byte { return []byte(fmt.Sprintf("key-%03d", i)) }
func ival(i int) []byte { return []byte(fmt.Sprintf("val-%03d", i)) }
func DoTestGetSetHasDelete(t *testing.T, load Loader) {
t.Helper()
db := load(t, t.TempDir())
var txn dbm.ReadWriter
view := db.Reader()
require.NotNil(t, view)
// A nonexistent key should return nil.
value, err := view.Get([]byte("a"))
require.NoError(t, err)
require.Nil(t, value)
ok, err := view.Has([]byte("a"))
require.NoError(t, err)
require.False(t, ok)
txn = db.ReadWriter()
// Set and get a value.
err = txn.Set([]byte("a"), []byte{0x01})
require.NoError(t, err)
ok, err = txn.Has([]byte("a"))
require.NoError(t, err)
require.True(t, ok)
value, err = txn.Get([]byte("a"))
require.NoError(t, err)
require.Equal(t, []byte{0x01}, value)
// New value is not visible from another txn.
ok, err = view.Has([]byte("a"))
require.NoError(t, err)
require.False(t, ok)
// Deleting a non-existent value is fine.
err = txn.Delete([]byte("x"))
require.NoError(t, err)
// Delete a value.
err = txn.Delete([]byte("a"))
require.NoError(t, err)
value, err = txn.Get([]byte("a"))
require.NoError(t, err)
require.Nil(t, value)
err = txn.Set([]byte("b"), []byte{0x02})
require.NoError(t, err)
require.NoError(t, view.Discard())
require.NoError(t, txn.Commit())
txn = db.ReadWriter()
// Verify committed values.
value, err = txn.Get([]byte("b"))
require.NoError(t, err)
require.Equal(t, []byte{0x02}, value)
ok, err = txn.Has([]byte("a"))
require.NoError(t, err)
require.False(t, ok)
// Setting, getting, and deleting an empty key should error.
_, err = txn.Get([]byte{})
require.Equal(t, dbm.ErrKeyEmpty, err)
_, err = txn.Get(nil)
require.Equal(t, dbm.ErrKeyEmpty, err)
_, err = txn.Has([]byte{})
require.Equal(t, dbm.ErrKeyEmpty, err)
_, err = txn.Has(nil)
require.Equal(t, dbm.ErrKeyEmpty, err)
err = txn.Set([]byte{}, []byte{0x01})
require.Equal(t, dbm.ErrKeyEmpty, err)
err = txn.Set(nil, []byte{0x01})
require.Equal(t, dbm.ErrKeyEmpty, err)
err = txn.Delete([]byte{})
require.Equal(t, dbm.ErrKeyEmpty, err)
err = txn.Delete(nil)
require.Equal(t, dbm.ErrKeyEmpty, err)
// Setting a nil value should error, but an empty value is fine.
err = txn.Set([]byte("x"), nil)
require.Equal(t, dbm.ErrValueNil, err)
err = txn.Set([]byte("x"), []byte{})
require.NoError(t, err)
value, err = txn.Get([]byte("x"))
require.NoError(t, err)
require.Equal(t, []byte{}, value)
require.NoError(t, txn.Commit())
require.NoError(t, db.Close())
}
func DoTestIterators(t *testing.T, load Loader) {
t.Helper()
db := load(t, t.TempDir())
type entry struct {
key []byte
val string
}
entries := []entry{
{[]byte{0}, "0"},
{[]byte{0, 0}, "0 0"},
{[]byte{0, 1}, "0 1"},
{[]byte{0, 2}, "0 2"},
{[]byte{1}, "1"},
}
txn := db.ReadWriter()
for _, e := range entries {
require.NoError(t, txn.Set(e.key, []byte(e.val)))
}
require.NoError(t, txn.Commit())
testRange := func(t *testing.T, iter dbm.Iterator, expected []string) {
i := 0
for ; iter.Next(); i++ {
expectedValue := expected[i]
value := iter.Value()
require.Equal(t, expectedValue, string(value), "i=%v", i)
}
require.Equal(t, len(expected), i)
}
type testCase struct {
start, end []byte
expected []string
}
view := db.Reader()
iterCases := []testCase{
{nil, nil, []string{"0", "0 0", "0 1", "0 2", "1"}},
{[]byte{0x00}, nil, []string{"0", "0 0", "0 1", "0 2", "1"}},
{[]byte{0x00}, []byte{0x00, 0x01}, []string{"0", "0 0"}},
{[]byte{0x00}, []byte{0x01}, []string{"0", "0 0", "0 1", "0 2"}},
{[]byte{0x00, 0x01}, []byte{0x01}, []string{"0 1", "0 2"}},
{nil, []byte{0x01}, []string{"0", "0 0", "0 1", "0 2"}},
}
for i, tc := range iterCases {
t.Logf("Iterator case %d: [%v, %v)", i, tc.start, tc.end)
it, err := view.Iterator(tc.start, tc.end)
require.NoError(t, err)
testRange(t, it, tc.expected)
it.Close()
}
reverseCases := []testCase{
{nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"}},
{[]byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"}},
{[]byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"}},
{[]byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}},
{[]byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"}},
{nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}},
}
for i, tc := range reverseCases {
t.Logf("ReverseIterator case %d: [%v, %v)", i, tc.start, tc.end)
it, err := view.ReverseIterator(tc.start, tc.end)
require.NoError(t, err)
testRange(t, it, tc.expected)
it.Close()
}
require.NoError(t, view.Discard())
require.NoError(t, db.Close())
}
func DoTestVersioning(t *testing.T, load Loader) {
t.Helper()
db := load(t, t.TempDir())
view := db.Reader()
require.NotNil(t, view)
// Write, then read different versions
txn := db.ReadWriter()
require.NoError(t, txn.Set([]byte("0"), []byte("a")))
require.NoError(t, txn.Set([]byte("1"), []byte("b")))
require.NoError(t, txn.Commit())
v1, err := db.SaveNextVersion()
require.NoError(t, err)
txn = db.ReadWriter()
require.NoError(t, txn.Set([]byte("0"), []byte("c")))
require.NoError(t, txn.Delete([]byte("1")))
require.NoError(t, txn.Set([]byte("2"), []byte("c")))
require.NoError(t, txn.Commit())
v2, err := db.SaveNextVersion()
require.NoError(t, err)
// Skip to a future version
v3 := (v2 + 2)
require.NoError(t, db.SaveVersion(v3))
// Try to save to a past version
err = db.SaveVersion(v2)
require.Error(t, err)
// Verify existing versions
versions, err := db.Versions()
require.NoError(t, err)
require.Equal(t, 3, versions.Count())
var all []uint64
for it := versions.Iterator(); it.Next(); {
all = append(all, it.Value())
}
sort.Slice(all, func(i, j int) bool { return all[i] < all[j] })
require.Equal(t, []uint64{v1, v2, v3}, all)
require.Equal(t, v3, versions.Last())
view, err = db.ReaderAt(v1)
require.NoError(t, err)
require.NotNil(t, view)
val, err := view.Get([]byte("0"))
require.Equal(t, []byte("a"), val)
require.NoError(t, err)
val, err = view.Get([]byte("1"))
require.Equal(t, []byte("b"), val)
require.NoError(t, err)
has, err := view.Has([]byte("2"))
require.False(t, has)
require.NoError(t, view.Discard())
view, err = db.ReaderAt(v2)
require.NoError(t, err)
require.NotNil(t, view)
val, err = view.Get([]byte("0"))
require.Equal(t, []byte("c"), val)
require.NoError(t, err)
val, err = view.Get([]byte("2"))
require.Equal(t, []byte("c"), val)
require.NoError(t, err)
has, err = view.Has([]byte("1"))
require.False(t, has)
require.NoError(t, view.Discard())
view, err = db.ReaderAt(versions.Last() + 1) //nolint:staticcheck // we nolint here because we are checking for the absence of an error.
require.Equal(t, dbm.ErrVersionDoesNotExist, err, "should fail to read a nonexistent version")
require.NoError(t, db.DeleteVersion(v2), "should delete version v2")
view, err = db.ReaderAt(v2) //nolint:staticcheck // we nolint here because we are checking for the absence of an error.
require.Equal(t, dbm.ErrVersionDoesNotExist, err)
// Ensure latest version is accurate
prev := v3
for i := 0; i < 10; i++ {
w := db.Writer()
require.NoError(t, w.Set(ikey(i), ival(i)))
require.NoError(t, w.Commit())
ver, err := db.SaveNextVersion()
require.NoError(t, err)
require.Equal(t, prev+1, ver)
versions, err := db.Versions()
require.NoError(t, err)
require.Equal(t, ver, versions.Last())
prev = ver
}
// Open multiple readers for the same past version
view, err = db.ReaderAt(v3)
require.NoError(t, err)
view2, err := db.ReaderAt(v3)
require.NoError(t, err)
require.NoError(t, view.Discard())
require.NoError(t, view2.Discard())
require.NoError(t, db.Close())
}
func DoTestTransactions(t *testing.T, load Loader, multipleWriters bool) {
t.Helper()
db := load(t, t.TempDir())
// Both methods should work in a DBWriter context
writerFuncs := []func() dbm.Writer{
db.Writer,
func() dbm.Writer { return db.ReadWriter() },
}
for _, getWriter := range writerFuncs {
// Uncommitted records are not saved
t.Run("no commit", func(t *testing.T) {
t.Helper()
view := db.Reader()
tx := getWriter()
require.NoError(t, tx.Set([]byte("0"), []byte("a")))
v, err := view.Get([]byte("0"))
require.NoError(t, err)
require.Nil(t, v)
require.NoError(t, view.Discard())
require.NoError(t, tx.Discard())
})
// Try to commit version with open txns
t.Run("cannot save with open transactions", func(t *testing.T) {
t.Helper()
tx := getWriter()
require.NoError(t, tx.Set([]byte("0"), []byte("a")))
_, err := db.SaveNextVersion()
require.Equal(t, dbm.ErrOpenTransactions, err)
require.NoError(t, tx.Discard())
})
// Try to use a transaction after closing
t.Run("cannot reuse transaction", func(t *testing.T) {
t.Helper()
tx := getWriter()
require.NoError(t, tx.Commit())
require.Error(t, tx.Set([]byte("0"), []byte("a")))
require.NoError(t, tx.Discard()) // redundant discard is fine
tx = getWriter()
require.NoError(t, tx.Discard())
require.Error(t, tx.Set([]byte("0"), []byte("a")))
require.NoError(t, tx.Discard())
})
// Continue only if the backend supports multiple concurrent writers
if !multipleWriters {
continue
}
// Writing separately to same key causes a conflict
t.Run("write conflict", func(t *testing.T) {
t.Helper()
tx1 := getWriter()
tx2 := db.ReadWriter()
tx2.Get([]byte("1"))
require.NoError(t, tx1.Set([]byte("1"), []byte("b")))
require.NoError(t, tx2.Set([]byte("1"), []byte("c")))
require.NoError(t, tx1.Commit())
require.Error(t, tx2.Commit())
})
// Writing from concurrent txns
t.Run("concurrent transactions", func(t *testing.T) {
t.Helper()
var wg sync.WaitGroup
setkv := func(k, v []byte) {
defer wg.Done()
tx := getWriter()
require.NoError(t, tx.Set(k, v))
require.NoError(t, tx.Commit())
}
n := 10
wg.Add(n)
for i := 0; i < n; i++ {
go setkv(ikey(i), ival(i))
}
wg.Wait()
view := db.Reader()
v, err := view.Get(ikey(0))
require.NoError(t, err)
require.Equal(t, ival(0), v)
require.NoError(t, view.Discard())
})
}
// Try to reuse a reader txn
view := db.Reader()
require.NoError(t, view.Discard())
_, err := view.Get([]byte("0"))
require.Error(t, err)
require.NoError(t, view.Discard()) // redundant discard is fine
require.NoError(t, db.Close())
}
// Test that Revert works as intended, optionally closing and
// reloading the DB both before and after reverting
func DoTestRevert(t *testing.T, load Loader, reload bool) {
t.Helper()
dirname := t.TempDir()
db := load(t, dirname)
var txn dbm.Writer
initContents := func() {
txn = db.Writer()
require.NoError(t, txn.Set([]byte{2}, []byte{2}))
require.NoError(t, txn.Commit())
txn = db.Writer()
for i := byte(6); i < 10; i++ {
require.NoError(t, txn.Set([]byte{i}, []byte{i}))
}
require.NoError(t, txn.Delete([]byte{2}))
require.NoError(t, txn.Delete([]byte{3}))
require.NoError(t, txn.Commit())
}
initContents()
require.NoError(t, db.Revert())
view := db.Reader()
it, err := view.Iterator(nil, nil)
require.NoError(t, err)
require.False(t, it.Next()) // db is empty
require.NoError(t, it.Close())
require.NoError(t, view.Discard())
initContents()
_, err = db.SaveNextVersion()
require.NoError(t, err)
// get snapshot of db state
state := map[string][]byte{}
view = db.Reader()
it, err = view.Iterator(nil, nil)
require.NoError(t, err)
for it.Next() {
state[string(it.Key())] = it.Value()
}
require.NoError(t, it.Close())
view.Discard()
checkContents := func() {
view = db.Reader()
count := 0
it, err = view.Iterator(nil, nil)
require.NoError(t, err)
for it.Next() {
val, has := state[string(it.Key())]
require.True(t, has, "key should not be present: %v => %v", it.Key(), it.Value())
require.Equal(t, val, it.Value())
count++
}
require.NoError(t, it.Close())
require.Equal(t, len(state), count)
view.Discard()
}
changeContents := func() {
txn = db.Writer()
require.NoError(t, txn.Set([]byte{3}, []byte{15}))
require.NoError(t, txn.Set([]byte{7}, []byte{70}))
require.NoError(t, txn.Delete([]byte{8}))
require.NoError(t, txn.Delete([]byte{9}))
require.NoError(t, txn.Set([]byte{10}, []byte{0}))
require.NoError(t, txn.Commit())
txn = db.Writer()
require.NoError(t, txn.Set([]byte{3}, []byte{30}))
require.NoError(t, txn.Set([]byte{8}, []byte{8}))
require.NoError(t, txn.Delete([]byte{9}))
require.NoError(t, txn.Commit())
}
changeContents()
if reload {
db.Close()
db = load(t, dirname)
}
txn = db.Writer()
require.Error(t, db.Revert()) // can't revert with open writers
txn.Discard()
require.NoError(t, db.Revert())
if reload {
db.Close()
db = load(t, dirname)
}
checkContents()
// With intermediate versions added & deleted, revert again to v1
changeContents()
v2, _ := db.SaveNextVersion()
txn = db.Writer()
require.NoError(t, txn.Delete([]byte{6}))
require.NoError(t, txn.Set([]byte{8}, []byte{9}))
require.NoError(t, txn.Set([]byte{11}, []byte{11}))
txn.Commit()
v3, _ := db.SaveNextVersion()
txn = db.Writer()
require.NoError(t, txn.Set([]byte{12}, []byte{12}))
txn.Commit()
db.DeleteVersion(v2)
db.DeleteVersion(v3)
db.Revert()
checkContents()
require.NoError(t, db.Close())
}
// Tests reloading a saved DB from disk.
func DoTestReloadDB(t *testing.T, load Loader) {
t.Helper()
dirname := t.TempDir()
db := load(t, dirname)
var firstVersions []uint64
for i := 0; i < 10; i++ {
txn := db.Writer()
require.NoError(t, txn.Set(ikey(i), ival(i)))
require.NoError(t, txn.Commit())
ver, err := db.SaveNextVersion()
require.NoError(t, err)
firstVersions = append(firstVersions, ver)
}
txn := db.Writer()
for i := 0; i < 5; i++ { // overwrite some values
require.NoError(t, txn.Set(ikey(i), ival(i*10)))
}
require.NoError(t, txn.Commit())
last, err := db.SaveNextVersion()
require.NoError(t, err)
txn = db.Writer()
require.NoError(t, txn.Set([]byte("working-version"), ival(100)))
require.NoError(t, txn.Commit())
txn = db.Writer()
require.NoError(t, txn.Set([]byte("uncommitted"), ival(200)))
// Reload and check each saved version
db.Close()
db = load(t, dirname)
// require.True(t, db.Versions().Equal(versions))
vset, err := db.Versions()
require.NoError(t, err)
require.Equal(t, last, vset.Last())
for i := 0; i < 10; i++ {
view, err := db.ReaderAt(firstVersions[i])
require.NoError(t, err)
val, err := view.Get(ikey(i))
require.NoError(t, err)
require.Equal(t, ival(i), val)
require.NoError(t, view.Discard())
}
view, err := db.ReaderAt(last)
require.NoError(t, err)
for i := 0; i < 10; i++ {
v, err := view.Get(ikey(i))
require.NoError(t, err)
if i < 5 {
require.Equal(t, ival(i*10), v)
} else {
require.Equal(t, ival(i), v)
}
}
require.NoError(t, view.Discard())
// Load working version
view = db.Reader()
val, err := view.Get([]byte("working-version"))
require.NoError(t, err)
require.Equal(t, ival(100), val)
val, err = view.Get([]byte("uncommitted"))
require.NoError(t, err)
require.Nil(t, val)
require.NoError(t, view.Discard())
require.NoError(t, db.Close())
}

View File

@ -1,52 +0,0 @@
package dbtest
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/cosmos/cosmos-sdk/db"
)
func AssertNext(t *testing.T, itr dbm.Iterator, expected bool) {
t.Helper()
require.Equal(t, expected, itr.Next())
}
func AssertDomain(t *testing.T, itr dbm.Iterator, start, end []byte) {
t.Helper()
ds, de := itr.Domain()
assert.Equal(t, start, ds, "checkDomain domain start incorrect")
assert.Equal(t, end, de, "checkDomain domain end incorrect")
}
func AssertItem(t *testing.T, itr dbm.Iterator, key, value []byte) {
t.Helper()
assert.Exactly(t, key, itr.Key())
assert.Exactly(t, value, itr.Value())
}
func AssertInvalid(t *testing.T, itr dbm.Iterator) {
t.Helper()
AssertNext(t, itr, false)
AssertKeyPanics(t, itr)
AssertValuePanics(t, itr)
}
func AssertKeyPanics(t *testing.T, itr dbm.Iterator) {
t.Helper()
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't")
}
func AssertValue(t *testing.T, db dbm.Reader, key, valueWanted []byte) {
t.Helper()
valueGot, err := db.Get(key)
assert.NoError(t, err)
assert.Equal(t, valueWanted, valueGot)
}
func AssertValuePanics(t *testing.T, itr dbm.Iterator) {
t.Helper()
assert.Panics(t, func() { itr.Value() })
}

View File

@ -1,39 +0,0 @@
go 1.18
module github.com/cosmos/cosmos-sdk/db
require (
// Note: gorocksdb bindings for OptimisticTransactionDB are not merged upstream, so we use a fork
// See https://github.com/tecbot/gorocksdb/pull/216
github.com/cosmos/gorocksdb v1.2.0
github.com/dgraph-io/badger/v3 v3.2103.2
github.com/dgraph-io/ristretto v0.1.0
github.com/google/btree v1.0.1
github.com/stretchr/testify v1.8.0
)
require (
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/flatbuffers v2.0.0+incompatible // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/klauspost/compress v1.15.9 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
go.opencensus.io v0.23.0 // indirect
golang.org/x/net v0.0.0-20220726230323-06994584191e // indirect
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

223
db/go.sum
View File

@ -1,223 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y=
github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8=
github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M=
github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI=
github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0=
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI=
github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20220726230323-06994584191e h1:wOQNKh1uuDGRnmgF0jDxh7ctgGy/3P4rYWQRVJD4/Yg=
golang.org/x/net v0.0.0-20220726230323-06994584191e/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -1,28 +0,0 @@
package util
import (
"fmt"
dbm "github.com/cosmos/cosmos-sdk/db"
)
func ValidateKv(key, value []byte) error {
if len(key) == 0 {
return dbm.ErrKeyEmpty
}
if value == nil {
return dbm.ErrValueNil
}
return nil
}
func CombineErrors(ret error, also error, desc string) error {
if also != nil {
if ret != nil {
ret = fmt.Errorf("%w; %s: %v", ret, desc, also)
} else {
ret = also
}
}
return ret
}

View File

@ -1,329 +0,0 @@
package memdb
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/db/internal"
"github.com/google/btree"
)
const (
// The approximate number of items and children per B-tree node. Tuned with benchmarks.
bTreeDegree = 32
)
// MemDB is an in-memory database backend using a B-tree for storage.
//
// For performance reasons, all given and returned keys and values are pointers to the in-memory
// database, so modifying them will cause the stored values to be modified as well. All DB methods
// already specify that keys and values should be considered read-only, but this is especially
// important with MemDB.
//
// Versioning is implemented by maintaining references to copy-on-write clones of the backing btree.
//
// Note: Currently, transactions do not detect write conflicts, so multiple writers cannot be
// safely committed to overlapping domains. Because of this, the number of open writers is
// limited to 1.
type MemDB struct {
btree *btree.BTree // Main contents
mtx sync.RWMutex // Guards version history
saved map[uint64]*btree.BTree // Past versions
vmgr *db.VersionManager // Mirrors version keys
openWriters int32 // Open writers
}
type dbTxn struct {
btree *btree.BTree
db *MemDB
}
type dbWriter struct{ dbTxn }
var (
_ db.Connection = (*MemDB)(nil)
_ db.Reader = (*dbTxn)(nil)
_ db.Writer = (*dbWriter)(nil)
_ db.ReadWriter = (*dbWriter)(nil)
)
// item is a btree.Item with byte slices as keys and values
type item struct {
key []byte
value []byte
}
// NewDB creates a new in-memory database.
func NewDB() *MemDB {
return &MemDB{
btree: btree.New(bTreeDegree),
saved: make(map[uint64]*btree.BTree),
vmgr: db.NewVersionManager(nil),
}
}
func (dbm *MemDB) newTxn(tree *btree.BTree) dbTxn {
return dbTxn{tree, dbm}
}
// Close implements DB.
// Close is a noop since for an in-memory database, we don't have a destination to flush
// contents to nor do we want any data loss on invoking Close().
// See the discussion in https://github.com/tendermint/tendermint/libs/pull/56
func (dbm *MemDB) Close() error {
return nil
}
// Versions implements Connection.
func (dbm *MemDB) Versions() (db.VersionSet, error) {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
return dbm.vmgr, nil
}
// Reader implements Connection.
func (dbm *MemDB) Reader() db.Reader {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
ret := dbm.newTxn(dbm.btree)
return &ret
}
// ReaderAt implements Connection.
func (dbm *MemDB) ReaderAt(version uint64) (db.Reader, error) {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
tree, ok := dbm.saved[version]
if !ok {
return nil, db.ErrVersionDoesNotExist
}
ret := dbm.newTxn(tree)
return &ret, nil
}
// Writer implements Connection.
func (dbm *MemDB) Writer() db.Writer {
return dbm.ReadWriter()
}
// ReadWriter implements Connection.
func (dbm *MemDB) ReadWriter() db.ReadWriter {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
atomic.AddInt32(&dbm.openWriters, 1)
// Clone creates a copy-on-write extension of the current tree
return &dbWriter{dbm.newTxn(dbm.btree.Clone())}
}
func (dbm *MemDB) save(target uint64) (uint64, error) {
dbm.mtx.Lock()
defer dbm.mtx.Unlock()
if dbm.openWriters > 0 {
return 0, db.ErrOpenTransactions
}
newVmgr := dbm.vmgr.Copy()
target, err := newVmgr.Save(target)
if err != nil {
return 0, err
}
dbm.saved[target] = dbm.btree
dbm.vmgr = newVmgr
return target, nil
}
// SaveVersion implements Connection.
func (dbm *MemDB) SaveNextVersion() (uint64, error) {
return dbm.save(0)
}
// SaveNextVersion implements Connection.
func (dbm *MemDB) SaveVersion(target uint64) error {
if target == 0 {
return db.ErrInvalidVersion
}
_, err := dbm.save(target)
return err
}
// DeleteVersion implements Connection.
func (dbm *MemDB) DeleteVersion(target uint64) error {
dbm.mtx.Lock()
defer dbm.mtx.Unlock()
if _, has := dbm.saved[target]; !has {
return db.ErrVersionDoesNotExist
}
delete(dbm.saved, target)
dbm.vmgr = dbm.vmgr.Copy()
dbm.vmgr.Delete(target)
return nil
}
func (dbm *MemDB) Revert() error {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
if dbm.openWriters > 0 {
return db.ErrOpenTransactions
}
last := dbm.vmgr.Last()
if last == 0 {
dbm.btree = btree.New(bTreeDegree)
return nil
}
var has bool
dbm.btree, has = dbm.saved[last]
if !has {
return fmt.Errorf("bad version history: version %v not saved", last)
}
for ver := range dbm.saved {
if ver > last {
delete(dbm.saved, ver)
}
}
return nil
}
// Get implements DBReader.
func (tx *dbTxn) Get(key []byte) ([]byte, error) {
if tx.btree == nil {
return nil, db.ErrTransactionClosed
}
if len(key) == 0 {
return nil, db.ErrKeyEmpty
}
i := tx.btree.Get(newKey(key))
if i != nil {
return i.(*item).value, nil
}
return nil, nil
}
// Has implements DBReader.
func (tx *dbTxn) Has(key []byte) (bool, error) {
if tx.btree == nil {
return false, db.ErrTransactionClosed
}
if len(key) == 0 {
return false, db.ErrKeyEmpty
}
return tx.btree.Has(newKey(key)), nil
}
// Set implements DBWriter.
func (tx *dbWriter) Set(key []byte, value []byte) error {
if tx.btree == nil {
return db.ErrTransactionClosed
}
if err := dbutil.ValidateKv(key, value); err != nil {
return err
}
tx.btree.ReplaceOrInsert(newPair(key, value))
return nil
}
// Delete implements DBWriter.
func (tx *dbWriter) Delete(key []byte) error {
if tx.btree == nil {
return db.ErrTransactionClosed
}
if len(key) == 0 {
return db.ErrKeyEmpty
}
tx.btree.Delete(newKey(key))
return nil
}
// Iterator implements DBReader.
// Takes out a read-lock on the database until the iterator is closed.
func (tx *dbTxn) Iterator(start, end []byte) (db.Iterator, error) {
if tx.btree == nil {
return nil, db.ErrTransactionClosed
}
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
return newMemDBIterator(tx, start, end, false), nil
}
// ReverseIterator implements DBReader.
// Takes out a read-lock on the database until the iterator is closed.
func (tx *dbTxn) ReverseIterator(start, end []byte) (db.Iterator, error) {
if tx.btree == nil {
return nil, db.ErrTransactionClosed
}
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
return newMemDBIterator(tx, start, end, true), nil
}
// Commit implements DBWriter.
func (tx *dbWriter) Commit() error {
if tx.btree == nil {
return db.ErrTransactionClosed
}
tx.db.mtx.Lock()
defer tx.db.mtx.Unlock()
tx.db.btree = tx.btree
return tx.Discard()
}
// Discard implements DBReader.
func (tx *dbTxn) Discard() error {
if tx.btree != nil {
tx.btree = nil
}
return nil
}
// Discard implements DBWriter.
func (tx *dbWriter) Discard() error {
if tx.btree != nil {
defer atomic.AddInt32(&tx.db.openWriters, -1)
}
return tx.dbTxn.Discard()
}
// Print prints the database contents.
func (dbm *MemDB) Print() error {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
dbm.btree.Ascend(func(i btree.Item) bool {
item := i.(*item)
fmt.Printf("[%X]:\t[%X]\n", item.key, item.value)
return true
})
return nil
}
// Stats implements Connection.
func (dbm *MemDB) Stats() map[string]string {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
stats := make(map[string]string)
stats["database.type"] = "memDB"
stats["database.size"] = fmt.Sprintf("%d", dbm.btree.Len())
return stats
}
// Less implements btree.Item.
func (i *item) Less(other btree.Item) bool {
// this considers nil == []byte{}, but that's ok since we handle nil endpoints
// in iterators specially anyway
return bytes.Compare(i.key, other.(*item).key) == -1
}
// newKey creates a new key item.
func newKey(key []byte) *item {
return &item{key: key}
}
// newPair creates a new pair item.
func newPair(key, value []byte) *item {
return &item{key: key, value: value}
}

View File

@ -1,53 +0,0 @@
package memdb
import (
"testing"
"github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/dbtest"
)
func BenchmarkMemDBRangeScans1M(b *testing.B) {
dbm := NewDB()
defer dbm.Close()
dbtest.BenchmarkRangeScans(b, dbm.ReadWriter(), int64(1e6))
}
func BenchmarkMemDBRangeScans10M(b *testing.B) {
dbm := NewDB()
defer dbm.Close()
dbtest.BenchmarkRangeScans(b, dbm.ReadWriter(), int64(10e6))
}
func BenchmarkMemDBRandomReadsWrites(b *testing.B) {
dbm := NewDB()
defer dbm.Close()
dbtest.BenchmarkRandomReadsWrites(b, dbm.ReadWriter())
}
func load(t *testing.T, _ string) db.Connection {
return NewDB()
}
func TestGetSetHasDelete(t *testing.T) {
dbtest.DoTestGetSetHasDelete(t, load)
}
func TestIterators(t *testing.T) {
dbtest.DoTestIterators(t, load)
}
func TestVersioning(t *testing.T) {
dbtest.DoTestVersioning(t, load)
}
func TestRevert(t *testing.T) {
dbtest.DoTestRevert(t, load, false)
}
func TestTransactions(t *testing.T) {
dbtest.DoTestTransactions(t, load, false)
}

View File

@ -1,139 +0,0 @@
package memdb
import (
"bytes"
"context"
"github.com/cosmos/cosmos-sdk/db"
"github.com/google/btree"
)
const (
// Size of the channel buffer between traversal goroutine and iterator. Using an unbuffered
// channel causes two context switches per item sent, while buffering allows more work per
// context switch. Tuned with benchmarks.
chBufferSize = 64
)
// memDBIterator is a memDB iterator.
type memDBIterator struct {
ch <-chan *item
cancel context.CancelFunc
item *item
start []byte
end []byte
}
var _ db.Iterator = (*memDBIterator)(nil)
// newMemDBIterator creates a new memDBIterator.
// A visitor is passed to the btree which streams items to the iterator over a channel. Advancing
// the iterator pulls items from the channel, returning execution to the visitor.
// The reverse case needs some special handling, since we use [start, end) while btree uses (start, end]
func newMemDBIterator(tx *dbTxn, start []byte, end []byte, reverse bool) *memDBIterator {
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan *item, chBufferSize)
iter := &memDBIterator{
ch: ch,
cancel: cancel,
start: start,
end: end,
}
go func() {
defer close(ch)
// Because we use [start, end) for reverse ranges, while btree uses (start, end], we need
// the following variables to handle some reverse iteration conditions ourselves.
var (
skipEqual []byte
abortLessThan []byte
)
visitor := func(i btree.Item) bool {
item := i.(*item)
if skipEqual != nil && bytes.Equal(item.key, skipEqual) {
skipEqual = nil
return true
}
if abortLessThan != nil && bytes.Compare(item.key, abortLessThan) == -1 {
return false
}
select {
case <-ctx.Done():
return false
case ch <- item:
return true
}
}
switch {
case start == nil && end == nil && !reverse:
tx.btree.Ascend(visitor)
case start == nil && end == nil && reverse:
tx.btree.Descend(visitor)
case end == nil && !reverse:
// must handle this specially, since nil is considered less than anything else
tx.btree.AscendGreaterOrEqual(newKey(start), visitor)
case !reverse:
tx.btree.AscendRange(newKey(start), newKey(end), visitor)
case end == nil:
// abort after start, since we use [start, end) while btree uses (start, end]
abortLessThan = start
tx.btree.Descend(visitor)
default:
// skip end and abort after start, since we use [start, end) while btree uses (start, end]
skipEqual = end
abortLessThan = start
tx.btree.DescendLessOrEqual(newKey(end), visitor)
}
}()
return iter
}
// Close implements Iterator.
func (i *memDBIterator) Close() error {
i.cancel()
for range i.ch { // drain channel
}
i.item = nil
return nil
}
// Domain implements Iterator.
func (i *memDBIterator) Domain() ([]byte, []byte) {
return i.start, i.end
}
// Next implements Iterator.
func (i *memDBIterator) Next() bool {
item, ok := <-i.ch
switch {
case ok:
i.item = item
default:
i.item = nil
}
return i.item != nil
}
// Error implements Iterator.
func (i *memDBIterator) Error() error {
return nil
}
// Key implements Iterator.
func (i *memDBIterator) Key() []byte {
i.assertIsValid()
return i.item.key
}
// Value implements Iterator.
func (i *memDBIterator) Value() []byte {
i.assertIsValid()
return i.item.value
}
func (i *memDBIterator) assertIsValid() {
if i.item == nil {
panic("iterator is invalid")
}
}

View File

@ -1,211 +0,0 @@
// Prefixed DB reader/writer types let you namespace multiple DBs within a single DB.
package prefix
import (
"github.com/cosmos/cosmos-sdk/db"
)
// prefixed Reader
type Reader struct {
db db.Reader
prefix []byte
}
// prefixed ReadWriter
type ReadWriter struct {
db db.ReadWriter
prefix []byte
}
// prefixed Writer
type Writer struct {
db db.Writer
prefix []byte
}
var (
_ db.Reader = (*Reader)(nil)
_ db.ReadWriter = (*ReadWriter)(nil)
_ db.Writer = (*Writer)(nil)
)
// NewReadereader returns a DBReader that only has access to the subset of DB keys
// that contain the given prefix.
func NewReader(dbr db.Reader, prefix []byte) Reader {
return Reader{
prefix: prefix,
db: dbr,
}
}
// NewReadWriter returns a DBReader that only has access to the subset of DB keys
// that contain the given prefix.
func NewReadWriter(dbrw db.ReadWriter, prefix []byte) ReadWriter {
return ReadWriter{
prefix: prefix,
db: dbrw,
}
}
// NewWriterriter returns a DBWriter that reads/writes only from the subset of DB keys
// that contain the given prefix
func NewWriter(dbw db.Writer, prefix []byte) Writer {
return Writer{
prefix: prefix,
db: dbw,
}
}
func prefixed(prefix, key []byte) []byte {
return append(cp(prefix), key...)
}
// Get implements DBReader.
func (pdb Reader) Get(key []byte) ([]byte, error) {
if len(key) == 0 {
return nil, db.ErrKeyEmpty
}
return pdb.db.Get(prefixed(pdb.prefix, key))
}
// Has implements DBReader.
func (pdb Reader) Has(key []byte) (bool, error) {
if len(key) == 0 {
return false, db.ErrKeyEmpty
}
return pdb.db.Has(prefixed(pdb.prefix, key))
}
// Iterator implements DBReader.
func (pdb Reader) Iterator(start, end []byte) (db.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
var pend []byte
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = prefixed(pdb.prefix, end)
}
itr, err := pdb.db.Iterator(prefixed(pdb.prefix, start), pend)
if err != nil {
return nil, err
}
return newPrefixIterator(pdb.prefix, start, end, itr), nil
}
// ReverseIterator implements DBReader.
func (pdb Reader) ReverseIterator(start, end []byte) (db.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
var pend []byte
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = prefixed(pdb.prefix, end)
}
ritr, err := pdb.db.ReverseIterator(prefixed(pdb.prefix, start), pend)
if err != nil {
return nil, err
}
return newPrefixIterator(pdb.prefix, start, end, ritr), nil
}
// Discard implements DBReader.
func (pdb Reader) Discard() error { return pdb.db.Discard() }
// Set implements DBReadWriter.
func (pdb ReadWriter) Set(key []byte, value []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
return pdb.db.Set(prefixed(pdb.prefix, key), value)
}
// Delete implements DBReadWriter.
func (pdb ReadWriter) Delete(key []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
return pdb.db.Delete(prefixed(pdb.prefix, key))
}
// Get implements DBReadWriter.
func (pdb ReadWriter) Get(key []byte) ([]byte, error) {
return NewReader(pdb.db, pdb.prefix).Get(key)
}
// Has implements DBReadWriter.
func (pdb ReadWriter) Has(key []byte) (bool, error) {
return NewReader(pdb.db, pdb.prefix).Has(key)
}
// Iterator implements DBReadWriter.
func (pdb ReadWriter) Iterator(start, end []byte) (db.Iterator, error) {
return NewReader(pdb.db, pdb.prefix).Iterator(start, end)
}
// ReverseIterator implements DBReadWriter.
func (pdb ReadWriter) ReverseIterator(start, end []byte) (db.Iterator, error) {
return NewReader(pdb.db, pdb.prefix).ReverseIterator(start, end)
}
// Close implements DBReadWriter.
func (pdb ReadWriter) Commit() error { return pdb.db.Commit() }
// Discard implements DBReadWriter.
func (pdb ReadWriter) Discard() error { return pdb.db.Discard() }
// Set implements DBReadWriter.
func (pdb Writer) Set(key []byte, value []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
return pdb.db.Set(prefixed(pdb.prefix, key), value)
}
// Delete implements DBWriter.
func (pdb Writer) Delete(key []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
return pdb.db.Delete(prefixed(pdb.prefix, key))
}
// Close implements DBWriter.
func (pdb Writer) Commit() error { return pdb.db.Commit() }
// Discard implements DBReadWriter.
func (pdb Writer) Discard() error { return pdb.db.Discard() }
func cp(bz []byte) (ret []byte) {
ret = make([]byte, len(bz))
copy(ret, bz)
return ret
}
// Returns a new slice of the same length (big endian), but incremented by one.
// Returns nil on overflow (e.g. if bz bytes are all 0xFF)
// CONTRACT: len(bz) > 0
func cpIncr(bz []byte) (ret []byte) {
if len(bz) == 0 {
panic("cpIncr expects non-zero bz length")
}
ret = cp(bz)
for i := len(bz) - 1; i >= 0; i-- {
if ret[i] < byte(0xFF) {
ret[i]++
return
}
ret[i] = byte(0x00)
if i == 0 {
// Overflow
return nil
}
}
return nil
}

View File

@ -1,112 +0,0 @@
package prefix
import (
"bytes"
"fmt"
"github.com/cosmos/cosmos-sdk/db"
)
// IteratePrefix is a convenience function for iterating over a key domain
// restricted by prefix.
func IteratePrefix(dbr db.Reader, prefix []byte) (db.Iterator, error) {
var start, end []byte
if len(prefix) != 0 {
start = prefix
end = cpIncr(prefix)
}
itr, err := dbr.Iterator(start, end)
if err != nil {
return nil, err
}
return itr, nil
}
// Strips prefix while iterating from Iterator.
type prefixDBIterator struct {
prefix []byte
start []byte
end []byte
source db.Iterator
err error
}
var _ db.Iterator = (*prefixDBIterator)(nil)
func newPrefixIterator(prefix, start, end []byte, source db.Iterator) *prefixDBIterator {
return &prefixDBIterator{
prefix: prefix,
start: start,
end: end,
source: source,
}
}
// Domain implements Iterator.
func (itr *prefixDBIterator) Domain() (start, end []byte) {
return itr.start, itr.end
}
func (itr *prefixDBIterator) valid() bool {
if itr.err != nil {
return false
}
key := itr.source.Key()
if len(key) < len(itr.prefix) || !bytes.Equal(key[:len(itr.prefix)], itr.prefix) {
itr.err = fmt.Errorf("received invalid key from backend: %x (expected prefix %x)",
key, itr.prefix)
return false
}
return true
}
// Next implements Iterator.
func (itr *prefixDBIterator) Next() bool {
if !itr.source.Next() {
return false
}
key := itr.source.Key()
if !bytes.HasPrefix(key, itr.prefix) {
return false
}
// Empty keys are not allowed, so if a key exists in the database that exactly matches the
// prefix we need to skip it.
if bytes.Equal(key, itr.prefix) {
return itr.Next()
}
return true
}
// Next implements Iterator.
func (itr *prefixDBIterator) Key() []byte {
itr.assertIsValid()
key := itr.source.Key()
return key[len(itr.prefix):] // we have checked the key in Valid()
}
// Value implements Iterator.
func (itr *prefixDBIterator) Value() []byte {
itr.assertIsValid()
return itr.source.Value()
}
// Error implements Iterator.
func (itr *prefixDBIterator) Error() error {
if err := itr.source.Error(); err != nil {
return err
}
return itr.err
}
// Close implements Iterator.
func (itr *prefixDBIterator) Close() error {
return itr.source.Close()
}
func (itr *prefixDBIterator) assertIsValid() {
if !itr.valid() {
panic("iterator is invalid")
}
}

View File

@ -1,157 +0,0 @@
package prefix_test
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/dbtest"
"github.com/cosmos/cosmos-sdk/db/memdb"
pfx "github.com/cosmos/cosmos-sdk/db/prefix"
)
func fillDBWithStuff(t *testing.T, dbw db.Writer) {
// Under "key" prefix
require.NoError(t, dbw.Set([]byte("key"), []byte("value")))
require.NoError(t, dbw.Set([]byte("key1"), []byte("value1")))
require.NoError(t, dbw.Set([]byte("key2"), []byte("value2")))
require.NoError(t, dbw.Set([]byte("key3"), []byte("value3")))
require.NoError(t, dbw.Set([]byte("something"), []byte("else")))
require.NoError(t, dbw.Set([]byte("k"), []byte("val")))
require.NoError(t, dbw.Set([]byte("ke"), []byte("valu")))
require.NoError(t, dbw.Set([]byte("kee"), []byte("valuu")))
require.NoError(t, dbw.Commit())
}
func mockDBWithStuff(t *testing.T) db.Connection {
dbm := memdb.NewDB()
fillDBWithStuff(t, dbm.Writer())
return dbm
}
func makePrefixReader(t *testing.T, dbc db.Connection, pre []byte) db.Reader {
view := dbc.Reader()
require.NotNil(t, view)
return pfx.NewReader(view, pre)
}
func TestPrefixDBSimple(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
dbtest.AssertValue(t, pdb, []byte("key"), nil)
dbtest.AssertValue(t, pdb, []byte("key1"), nil)
dbtest.AssertValue(t, pdb, []byte("1"), []byte("value1"))
dbtest.AssertValue(t, pdb, []byte("key2"), nil)
dbtest.AssertValue(t, pdb, []byte("2"), []byte("value2"))
dbtest.AssertValue(t, pdb, []byte("key3"), nil)
dbtest.AssertValue(t, pdb, []byte("3"), []byte("value3"))
dbtest.AssertValue(t, pdb, []byte("something"), nil)
dbtest.AssertValue(t, pdb, []byte("k"), nil)
dbtest.AssertValue(t, pdb, []byte("ke"), nil)
dbtest.AssertValue(t, pdb, []byte("kee"), nil)
}
func TestPrefixDBIterator1(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.Iterator(nil, nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, nil, nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator1(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator(nil, nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, nil, nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator5(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator([]byte("1"), nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, []byte("1"), nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator6(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator([]byte("2"), nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, []byte("2"), nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator7(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator(nil, []byte("2"))
require.NoError(t, err)
dbtest.AssertDomain(t, itr, nil, []byte("2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBViewVersion(t *testing.T) {
prefix := []byte("key")
dbm := memdb.NewDB()
fillDBWithStuff(t, dbm.Writer())
id, err := dbm.SaveNextVersion()
require.NoError(t, err)
pdb := pfx.NewReadWriter(dbm.ReadWriter(), prefix)
pdb.Set([]byte("1"), []byte("newvalue1"))
pdb.Delete([]byte("2"))
pdb.Set([]byte("4"), []byte("newvalue4"))
pdb.Discard()
dbview, err := dbm.ReaderAt(id)
require.NotNil(t, dbview)
require.NoError(t, err)
view := pfx.NewReader(dbview, prefix)
require.NotNil(t, view)
defer view.Discard()
dbtest.AssertValue(t, view, []byte("1"), []byte("value1"))
dbtest.AssertValue(t, view, []byte("2"), []byte("value2"))
dbtest.AssertValue(t, view, []byte("4"), nil)
}

View File

@ -1,69 +0,0 @@
//go:build rocksdb_build
package rocksdb
import (
"sync/atomic"
"github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/db/internal"
"github.com/cosmos/gorocksdb"
)
type rocksDBBatch struct {
batch *gorocksdb.WriteBatch
mgr *dbManager
}
var _ db.Writer = (*rocksDBBatch)(nil)
func (mgr *dbManager) newRocksDBBatch() *rocksDBBatch {
return &rocksDBBatch{
batch: gorocksdb.NewWriteBatch(),
mgr: mgr,
}
}
// Set implements Writer.
func (b *rocksDBBatch) Set(key, value []byte) error {
if err := dbutil.ValidateKv(key, value); err != nil {
return err
}
if b.batch == nil {
return db.ErrTransactionClosed
}
b.batch.Put(key, value)
return nil
}
// Delete implements Writer.
func (b *rocksDBBatch) Delete(key []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
if b.batch == nil {
return db.ErrTransactionClosed
}
b.batch.Delete(key)
return nil
}
// Write implements Writer.
func (b *rocksDBBatch) Commit() (err error) {
if b.batch == nil {
return db.ErrTransactionClosed
}
defer func() { err = dbutil.CombineErrors(err, b.Discard(), "Discard also failed") }()
err = b.mgr.current.Write(b.mgr.opts.wo, b.batch)
return
}
// Close implements Writer.
func (b *rocksDBBatch) Discard() error {
if b.batch != nil {
defer atomic.AddInt32(&b.mgr.openWriters, -1)
b.batch.Destroy()
b.batch = nil
}
return nil
}

View File

@ -1,483 +0,0 @@
//go:build rocksdb_build
package rocksdb
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"sync/atomic"
"github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/db/internal"
"github.com/cosmos/gorocksdb"
)
var (
currentDBFileName string = "current.db"
checkpointFileFormat string = "%020d.db"
)
var (
_ db.Connection = (*RocksDB)(nil)
_ db.Reader = (*dbTxn)(nil)
_ db.Writer = (*dbWriter)(nil)
_ db.ReadWriter = (*dbWriter)(nil)
)
// RocksDB is a connection to a RocksDB key-value database.
type RocksDB = dbManager
type dbManager struct {
current *Connection
dir string
opts dbOptions
vmgr *db.VersionManager
mtx sync.RWMutex
// Track open DBWriters
openWriters int32
cpCache checkpointCache
}
type Connection = gorocksdb.OptimisticTransactionDB
type checkpointCache struct {
cache map[uint64]*cpCacheEntry
mtx sync.RWMutex
}
type cpCacheEntry struct {
cxn *Connection
openCount uint
}
type dbTxn struct {
txn *gorocksdb.Transaction
mgr *dbManager
version uint64
}
type dbWriter struct{ dbTxn }
type dbOptions struct {
dbo *gorocksdb.Options
txo *gorocksdb.OptimisticTransactionOptions
ro *gorocksdb.ReadOptions
wo *gorocksdb.WriteOptions
}
// NewDB creates a new RocksDB key-value database with inside the given directory.
// If dir does not exist, it will be created.
func NewDB(dir string) (*dbManager, error) {
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, err
}
// default rocksdb option, good enough for most cases, including heavy workloads.
// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads).
// compression: snappy as default, need to -lsnappy to enable.
bbto := gorocksdb.NewDefaultBlockBasedTableOptions()
bbto.SetBlockCache(gorocksdb.NewLRUCache(1 << 30))
bbto.SetFilterPolicy(gorocksdb.NewBloomFilter(10))
dbo := gorocksdb.NewDefaultOptions()
dbo.SetBlockBasedTableFactory(bbto)
dbo.SetCreateIfMissing(true)
dbo.IncreaseParallelism(runtime.NumCPU())
// 1.5GB maximum memory use for writebuffer.
dbo.OptimizeLevelStyleCompaction(1<<30 + 512<<20)
opts := dbOptions{
dbo: dbo,
txo: gorocksdb.NewDefaultOptimisticTransactionOptions(),
ro: gorocksdb.NewDefaultReadOptions(),
wo: gorocksdb.NewDefaultWriteOptions(),
}
mgr := &dbManager{
dir: dir,
opts: opts,
cpCache: checkpointCache{cache: map[uint64]*cpCacheEntry{}},
}
err := os.MkdirAll(mgr.checkpointsDir(), 0o755)
if err != nil {
return nil, err
}
if mgr.vmgr, err = readVersions(mgr.checkpointsDir()); err != nil {
return nil, err
}
dbPath := filepath.Join(dir, currentDBFileName)
// if the current db file is missing but there are checkpoints, restore it
if mgr.vmgr.Count() > 0 {
if _, err = os.Stat(dbPath); os.IsNotExist(err) {
err = mgr.restoreFromCheckpoint(mgr.vmgr.Last(), dbPath)
if err != nil {
return nil, err
}
}
}
mgr.current, err = gorocksdb.OpenOptimisticTransactionDb(dbo, dbPath)
if err != nil {
return nil, err
}
return mgr, nil
}
func (mgr *dbManager) checkpointsDir() string {
return filepath.Join(mgr.dir, "checkpoints")
}
// Reads directory for checkpoints files
func readVersions(dir string) (*db.VersionManager, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var versions []uint64
for _, f := range files {
var version uint64
if _, err := fmt.Sscanf(f.Name(), checkpointFileFormat, &version); err != nil {
return nil, err
}
versions = append(versions, version)
}
return db.NewVersionManager(versions), nil
}
func (mgr *dbManager) checkpointPath(version uint64) (string, error) {
dbPath := filepath.Join(mgr.checkpointsDir(), fmt.Sprintf(checkpointFileFormat, version))
if stat, err := os.Stat(dbPath); err != nil {
if errors.Is(err, os.ErrNotExist) {
err = db.ErrVersionDoesNotExist
}
return "", err
} else if !stat.IsDir() {
return "", db.ErrVersionDoesNotExist
}
return dbPath, nil
}
func (mgr *dbManager) openCheckpoint(version uint64) (*Connection, error) {
mgr.cpCache.mtx.Lock()
defer mgr.cpCache.mtx.Unlock()
cp, has := mgr.cpCache.cache[version]
if has {
cp.openCount += 1
return cp.cxn, nil
}
dbPath, err := mgr.checkpointPath(version)
if err != nil {
return nil, err
}
db, err := gorocksdb.OpenOptimisticTransactionDb(mgr.opts.dbo, dbPath)
if err != nil {
return nil, err
}
mgr.cpCache.cache[version] = &cpCacheEntry{cxn: db, openCount: 1}
return db, nil
}
func (mgr *dbManager) Reader() db.Reader {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
return &dbTxn{
// Note: oldTransaction could be passed here as a small optimization to
// avoid allocating a new object.
txn: mgr.current.TransactionBegin(mgr.opts.wo, mgr.opts.txo, nil),
mgr: mgr,
}
}
func (mgr *dbManager) ReaderAt(version uint64) (db.Reader, error) {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
d, err := mgr.openCheckpoint(version)
if err != nil {
return nil, err
}
return &dbTxn{
txn: d.TransactionBegin(mgr.opts.wo, mgr.opts.txo, nil),
mgr: mgr,
version: version,
}, nil
}
func (mgr *dbManager) ReadWriter() db.ReadWriter {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
atomic.AddInt32(&mgr.openWriters, 1)
return &dbWriter{dbTxn{
txn: mgr.current.TransactionBegin(mgr.opts.wo, mgr.opts.txo, nil),
mgr: mgr,
}}
}
func (mgr *dbManager) Writer() db.Writer {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
atomic.AddInt32(&mgr.openWriters, 1)
return mgr.newRocksDBBatch()
}
func (mgr *dbManager) Versions() (db.VersionSet, error) {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
return mgr.vmgr, nil
}
// SaveNextVersion implements Connection.
func (mgr *dbManager) SaveNextVersion() (uint64, error) {
return mgr.save(0)
}
// SaveVersion implements Connection.
func (mgr *dbManager) SaveVersion(target uint64) error {
if target == 0 {
return db.ErrInvalidVersion
}
_, err := mgr.save(target)
return err
}
func (mgr *dbManager) save(target uint64) (uint64, error) {
mgr.mtx.Lock()
defer mgr.mtx.Unlock()
if mgr.openWriters > 0 {
return 0, db.ErrOpenTransactions
}
newVmgr := mgr.vmgr.Copy()
target, err := newVmgr.Save(target)
if err != nil {
return 0, err
}
cp, err := mgr.current.NewCheckpoint()
if err != nil {
return 0, err
}
dir := filepath.Join(mgr.checkpointsDir(), fmt.Sprintf(checkpointFileFormat, target))
if err := cp.CreateCheckpoint(dir, 0); err != nil {
return 0, err
}
cp.Destroy()
mgr.vmgr = newVmgr
return target, nil
}
func (mgr *dbManager) DeleteVersion(ver uint64) error {
if mgr.cpCache.has(ver) {
return db.ErrOpenTransactions
}
mgr.mtx.Lock()
defer mgr.mtx.Unlock()
dbPath, err := mgr.checkpointPath(ver)
if err != nil {
return err
}
mgr.vmgr = mgr.vmgr.Copy()
mgr.vmgr.Delete(ver)
return os.RemoveAll(dbPath)
}
func (mgr *dbManager) Revert() (err error) {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
if mgr.openWriters > 0 {
return db.ErrOpenTransactions
}
// Close current connection and replace it with a checkpoint (created from the last checkpoint)
mgr.current.Close()
dbPath := filepath.Join(mgr.dir, currentDBFileName)
err = os.RemoveAll(dbPath)
if err != nil {
return
}
if last := mgr.vmgr.Last(); last != 0 {
err = mgr.restoreFromCheckpoint(last, dbPath)
if err != nil {
return
}
}
mgr.current, err = gorocksdb.OpenOptimisticTransactionDb(mgr.opts.dbo, dbPath)
return
}
func (mgr *dbManager) restoreFromCheckpoint(version uint64, path string) error {
cxn, err := mgr.openCheckpoint(version)
if err != nil {
return err
}
defer mgr.cpCache.decrement(version)
cp, err := cxn.NewCheckpoint()
if err != nil {
return err
}
err = cp.CreateCheckpoint(path, 0)
if err != nil {
return err
}
cp.Destroy()
return nil
}
// Close implements Connection.
func (mgr *dbManager) Close() error {
mgr.current.Close()
mgr.opts.destroy()
return nil
}
// Stats implements Connection.
func (mgr *dbManager) Stats() map[string]string {
keys := []string{"rocksdb.stats"}
stats := make(map[string]string, len(keys))
for _, key := range keys {
stats[key] = mgr.current.GetProperty(key)
}
return stats
}
// Get implements Reader.
func (tx *dbTxn) Get(key []byte) ([]byte, error) {
if tx.txn == nil {
return nil, db.ErrTransactionClosed
}
if len(key) == 0 {
return nil, db.ErrKeyEmpty
}
res, err := tx.txn.Get(tx.mgr.opts.ro, key)
if err != nil {
return nil, err
}
return moveSliceToBytes(res), nil
}
// Get implements Reader.
func (tx *dbWriter) Get(key []byte) ([]byte, error) {
if tx.txn == nil {
return nil, db.ErrTransactionClosed
}
if len(key) == 0 {
return nil, db.ErrKeyEmpty
}
res, err := tx.txn.GetForUpdate(tx.mgr.opts.ro, key)
if err != nil {
return nil, err
}
return moveSliceToBytes(res), nil
}
// Has implements DBReader.
func (tx *dbTxn) Has(key []byte) (bool, error) {
bytes, err := tx.Get(key)
if err != nil {
return false, err
}
return bytes != nil, nil
}
// Set implements DBWriter.
func (tx *dbWriter) Set(key []byte, value []byte) error {
if tx.txn == nil {
return db.ErrTransactionClosed
}
if err := dbutil.ValidateKv(key, value); err != nil {
return err
}
return tx.txn.Put(key, value)
}
// Delete implements DBWriter.
func (tx *dbWriter) Delete(key []byte) error {
if tx.txn == nil {
return db.ErrTransactionClosed
}
if len(key) == 0 {
return db.ErrKeyEmpty
}
return tx.txn.Delete(key)
}
func (tx *dbWriter) Commit() (err error) {
if tx.txn == nil {
return db.ErrTransactionClosed
}
defer func() { err = dbutil.CombineErrors(err, tx.Discard(), "Discard also failed") }()
err = tx.txn.Commit()
return
}
func (tx *dbTxn) Discard() error {
if tx.txn == nil {
return nil // Discard() is idempotent
}
defer func() { tx.txn.Destroy(); tx.txn = nil }()
if tx.version == 0 {
return nil
}
if !tx.mgr.cpCache.decrement(tx.version) {
return fmt.Errorf("transaction has no corresponding checkpoint cache entry: %v", tx.version)
}
return nil
}
func (tx *dbWriter) Discard() error {
if tx.txn != nil {
defer atomic.AddInt32(&tx.mgr.openWriters, -1)
}
return tx.dbTxn.Discard()
}
// Iterator implements DBReader.
func (tx *dbTxn) Iterator(start, end []byte) (db.Iterator, error) {
if tx.txn == nil {
return nil, db.ErrTransactionClosed
}
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
itr := tx.txn.NewIterator(tx.mgr.opts.ro)
return newRocksDBIterator(itr, start, end, false), nil
}
// ReverseIterator implements DBReader.
func (tx *dbTxn) ReverseIterator(start, end []byte) (db.Iterator, error) {
if tx.txn == nil {
return nil, db.ErrTransactionClosed
}
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
itr := tx.txn.NewIterator(tx.mgr.opts.ro)
return newRocksDBIterator(itr, start, end, true), nil
}
func (o dbOptions) destroy() {
o.ro.Destroy()
o.wo.Destroy()
o.txo.Destroy()
o.dbo.Destroy()
}
func (cpc *checkpointCache) has(ver uint64) bool {
cpc.mtx.RLock()
defer cpc.mtx.RUnlock()
_, has := cpc.cache[ver]
return has
}
func (cpc *checkpointCache) decrement(ver uint64) bool {
cpc.mtx.Lock()
defer cpc.mtx.Unlock()
cp, has := cpc.cache[ver]
if !has {
return false
}
cp.openCount -= 1
if cp.openCount == 0 {
cp.cxn.Close()
delete(cpc.cache, ver)
}
return true
}

View File

@ -1,77 +0,0 @@
//go:build rocksdb_build
package rocksdb
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/dbtest"
)
func load(t *testing.T, dir string) db.Connection {
d, err := NewDB(dir)
require.NoError(t, err)
return d
}
func TestGetSetHasDelete(t *testing.T) {
dbtest.DoTestGetSetHasDelete(t, load)
}
func TestIterators(t *testing.T) {
dbtest.DoTestIterators(t, load)
}
func TestTransactions(t *testing.T) {
dbtest.DoTestTransactions(t, load, true)
}
func TestVersioning(t *testing.T) {
dbtest.DoTestVersioning(t, load)
}
func TestRevert(t *testing.T) {
dbtest.DoTestRevert(t, load, false)
dbtest.DoTestRevert(t, load, true)
}
func TestReloadDB(t *testing.T) {
dbtest.DoTestReloadDB(t, load)
}
// Test that the DB can be reloaded after a failed Revert
func TestRevertRecovery(t *testing.T) {
dir := t.TempDir()
d, err := NewDB(dir)
require.NoError(t, err)
txn := d.Writer()
require.NoError(t, txn.Set([]byte{1}, []byte{1}))
require.NoError(t, txn.Commit())
_, err = d.SaveNextVersion()
require.NoError(t, err)
txn = d.Writer()
require.NoError(t, txn.Set([]byte{2}, []byte{2}))
require.NoError(t, txn.Commit())
// move checkpoints dir temporarily to trigger an error
hideDir := filepath.Join(dir, "hide_checkpoints")
require.NoError(t, os.Rename(d.checkpointsDir(), hideDir))
require.Error(t, d.Revert())
require.NoError(t, os.Rename(hideDir, d.checkpointsDir()))
d, err = NewDB(dir)
require.NoError(t, err)
view := d.Reader()
val, err := view.Get([]byte{1})
require.NoError(t, err)
require.Equal(t, []byte{1}, val)
val, err = view.Get([]byte{2})
require.NoError(t, err)
require.Nil(t, val)
view.Discard()
}

View File

@ -1,149 +0,0 @@
//go:build rocksdb_build
package rocksdb
import (
"bytes"
"github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/gorocksdb"
)
type rocksDBIterator struct {
source *gorocksdb.Iterator
start, end []byte
isReverse bool
isInvalid bool
// Whether iterator has been advanced to the first element (is fully initialized)
primed bool
}
var _ db.Iterator = (*rocksDBIterator)(nil)
func newRocksDBIterator(source *gorocksdb.Iterator, start, end []byte, isReverse bool) *rocksDBIterator {
if isReverse {
if end == nil {
source.SeekToLast()
} else {
source.Seek(end)
if source.Valid() {
eoakey := moveSliceToBytes(source.Key()) // end or after key
if bytes.Compare(end, eoakey) <= 0 {
source.Prev()
}
} else {
source.SeekToLast()
}
}
} else {
if start == nil {
source.SeekToFirst()
} else {
source.Seek(start)
}
}
return &rocksDBIterator{
source: source,
start: start,
end: end,
isReverse: isReverse,
isInvalid: false,
primed: false,
}
}
// Domain implements Iterator.
func (itr *rocksDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
// Valid implements Iterator.
func (itr *rocksDBIterator) Valid() bool {
if !itr.primed {
return false
}
if itr.isInvalid {
return false
}
if !itr.source.Valid() {
itr.isInvalid = true
return false
}
var (
start = itr.start
end = itr.end
key = moveSliceToBytes(itr.source.Key())
)
// If key is end or past it, invalid.
if itr.isReverse {
if start != nil && bytes.Compare(key, start) < 0 {
itr.isInvalid = true
return false
}
} else {
if end != nil && bytes.Compare(key, end) >= 0 {
itr.isInvalid = true
return false
}
}
return true
}
// Key implements Iterator.
func (itr *rocksDBIterator) Key() []byte {
itr.assertIsValid()
return moveSliceToBytes(itr.source.Key())
}
// Value implements Iterator.
func (itr *rocksDBIterator) Value() []byte {
itr.assertIsValid()
return moveSliceToBytes(itr.source.Value())
}
// Next implements Iterator.
func (itr *rocksDBIterator) Next() bool {
if !itr.primed {
itr.primed = true
} else {
if itr.isReverse {
itr.source.Prev()
} else {
itr.source.Next()
}
}
return itr.Valid()
}
// Error implements Iterator.
func (itr *rocksDBIterator) Error() error {
return itr.source.Err()
}
// Close implements Iterator.
func (itr *rocksDBIterator) Close() error {
itr.source.Close()
return nil
}
func (itr *rocksDBIterator) assertIsValid() {
if !itr.Valid() {
panic("iterator is invalid")
}
}
// moveSliceToBytes will free the slice and copy out a go []byte
// This function can be applied on *Slice returned from Key() and Value()
// of an Iterator, because they are marked as freed.
func moveSliceToBytes(s *gorocksdb.Slice) []byte {
defer s.Free()
if !s.Exists() {
return nil
}
v := make([]byte, s.Size())
copy(v, s.Data())
return v
}

View File

@ -1,204 +0,0 @@
package db
import "errors"
var (
// ErrTransactionClosed is returned when a closed or written transaction is used.
ErrTransactionClosed = errors.New("transaction has been written or closed")
// ErrKeyEmpty is returned when attempting to use an empty or nil key.
ErrKeyEmpty = errors.New("key cannot be empty")
// ErrValueNil is returned when attempting to set a nil value.
ErrValueNil = errors.New("value cannot be nil")
// ErrVersionDoesNotExist is returned when a DB version does not exist.
ErrVersionDoesNotExist = errors.New("version does not exist")
// ErrOpenTransactions is returned when open transactions exist which must
// be discarded/committed before an operation can complete.
ErrOpenTransactions = errors.New("open transactions exist")
// ErrReadOnly is returned when a write operation is attempted on a read-only transaction.
ErrReadOnly = errors.New("cannot modify read-only transaction")
// ErrInvalidVersion is returned when an operation attempts to use an invalid version ID.
ErrInvalidVersion = errors.New("invalid version")
)
// Connection represents a connection to a versioned database.
// Records are accessed via transaction objects, and must be safe for concurrent creation
// and read and write access.
// Past versions are only accessible read-only.
type Connection interface {
// Reader opens a read-only transaction at the current working version.
Reader() Reader
// ReaderAt opens a read-only transaction at a specified version.
// Returns ErrVersionDoesNotExist for invalid versions.
ReaderAt(uint64) (Reader, error)
// ReadWriter opens a read-write transaction at the current version.
ReadWriter() ReadWriter
// Writer opens a write-only transaction at the current version.
Writer() Writer
// Versions returns all saved versions as an immutable set which is safe for concurrent access.
Versions() (VersionSet, error)
// SaveNextVersion saves the current contents of the database and returns the next version ID,
// which will be `Versions().Last()+1`.
// Returns an error if any open DBWriter transactions exist.
// TODO: rename to something more descriptive?
SaveNextVersion() (uint64, error)
// SaveVersion attempts to save database at a specific version ID, which must be greater than or
// equal to what would be returned by `SaveNextVersion`.
// Returns an error if any open DBWriter transactions exist.
SaveVersion(uint64) error
// DeleteVersion deletes a saved version. Returns ErrVersionDoesNotExist for invalid versions.
DeleteVersion(uint64) error
// Revert reverts the DB state to the last saved version; if none exist, this clears the DB.
// Returns an error if any open DBWriter transactions exist.
Revert() error
// Close closes the database connection.
Close() error
}
// DBReader is a read-only transaction interface. It is safe for concurrent access.
// Callers must call Discard when done with the transaction.
//
// Keys cannot be nil or empty, while values cannot be nil. Keys and values should be considered
// read-only, both when returned and when given, and must be copied before they are modified.
type Reader interface {
// Get fetches the value of the given key, or nil if it does not exist.
// CONTRACT: key, value readonly []byte
Get([]byte) ([]byte, error)
// Has checks if a key exists.
// CONTRACT: key, value readonly []byte
Has(key []byte) (bool, error)
// Iterator returns an iterator over a domain of keys, in ascending order. The caller must call
// Close when done. End is exclusive, and start must be less than end. A nil start iterates
// from the first key, and a nil end iterates to the last key (inclusive). Empty keys are not
// valid.
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
// CONTRACT: start, end readonly []byte
Iterator(start, end []byte) (Iterator, error)
// ReverseIterator returns an iterator over a domain of keys, in descending order. The caller
// must call Close when done. End is exclusive, and start must be less than end. A nil end
// iterates from the last key (inclusive), and a nil start iterates to the first key (inclusive).
// Empty keys are not valid.
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
// CONTRACT: start, end readonly []byte
// TODO: replace with an extra argument to Iterator()?
ReverseIterator(start, end []byte) (Iterator, error)
// Discard discards the transaction, invalidating any future operations on it.
Discard() error
}
// DBWriter is a write-only transaction interface.
// It is safe for concurrent writes, following an optimistic (OCC) strategy, detecting any write
// conflicts and returning an error on commit, rather than locking the DB.
// Callers must call Commit or Discard when done with the transaction.
//
// This can be used to wrap a write-optimized batch object if provided by the backend implementation.
type Writer interface {
// Set sets the value for the given key, replacing it if it already exists.
// CONTRACT: key, value readonly []byte
Set([]byte, []byte) error
// Delete deletes the key, or does nothing if the key does not exist.
// CONTRACT: key readonly []byte
Delete([]byte) error
// Commit flushes pending writes and discards the transaction.
Commit() error
// Discard discards the transaction, invalidating any future operations on it.
Discard() error
}
// DBReadWriter is a transaction interface that allows both reading and writing.
type ReadWriter interface {
Reader
Writer
}
// Iterator represents an iterator over a domain of keys. Callers must call Close when done.
// No writes can happen to a domain while there exists an iterator over it, some backends may take
// out database locks to ensure this will not happen.
//
// Callers must make sure the iterator is valid before calling any methods on it, otherwise
// these methods will panic. This is in part caused by most backend databases using this convention.
// Note that the iterator is invalid on construction: Next() must be called to initialize it to its
// starting position.
//
// As with DBReader, keys and values should be considered read-only, and must be copied before they are
// modified.
//
// Typical usage:
//
// var itr Iterator = ...
// defer itr.Close()
//
// for itr.Next() {
// k, v := itr.Key(); itr.Value()
// ...
// }
// if err := itr.Error(); err != nil {
// ...
// }
type Iterator interface {
// Domain returns the start (inclusive) and end (exclusive) limits of the iterator.
// CONTRACT: start, end readonly []byte
Domain() (start []byte, end []byte)
// Next moves the iterator to the next key in the database, as defined by order of iteration;
// returns whether the iterator is valid.
// Once this function returns false, the iterator remains invalid forever.
Next() bool
// Key returns the key at the current position. Panics if the iterator is invalid.
// CONTRACT: key readonly []byte
Key() (key []byte)
// Value returns the value at the current position. Panics if the iterator is invalid.
// CONTRACT: value readonly []byte
Value() (value []byte)
// Error returns the last error encountered by the iterator, if any.
Error() error
// Close closes the iterator, relasing any allocated resources.
Close() error
}
// VersionSet specifies a set of existing versions
type VersionSet interface {
// Last returns the most recent saved version, or 0 if none.
Last() uint64
// Count returns the number of saved versions.
Count() int
// Iterator returns an iterator over all saved versions.
Iterator() VersionIterator
// Equal returns true iff this set is identical to another.
Equal(VersionSet) bool
// Exists returns true if a saved version exists.
Exists(uint64) bool
}
type VersionIterator interface {
// Next advances the iterator to the next element.
// Returns whether the iterator is valid; once invalid, it remains invalid forever.
Next() bool
// Value returns the version ID at the current position.
Value() uint64
}

View File

@ -1,132 +0,0 @@
package db
import (
"fmt"
)
// VersionManager encapsulates the current valid versions of a DB and computes
// the next version.
type VersionManager struct {
versions map[uint64]struct{}
initial, last uint64
}
var _ VersionSet = (*VersionManager)(nil)
// NewVersionManager creates a VersionManager from a slice of version ids.
func NewVersionManager(versions []uint64) *VersionManager {
vmap := make(map[uint64]struct{})
var init, last uint64
for _, ver := range versions {
vmap[ver] = struct{}{}
if init == 0 || ver < init {
init = ver
}
if ver > last {
last = ver
}
}
return &VersionManager{versions: vmap, initial: init, last: last}
}
// Exists implements VersionSet.
func (vm *VersionManager) Exists(version uint64) bool {
_, has := vm.versions[version]
return has
}
// Last implements VersionSet.
func (vm *VersionManager) Last() uint64 {
return vm.last
}
func (vm *VersionManager) Initial() uint64 {
return vm.initial
}
func (vm *VersionManager) Save(target uint64) (uint64, error) {
next := vm.Last() + 1
if target == 0 {
target = next
} else if target < next {
return 0, fmt.Errorf(
"target version cannot be less than next sequential version (%v < %v)", target, next)
}
if _, has := vm.versions[target]; has {
return 0, fmt.Errorf("version exists: %v", target)
}
vm.versions[target] = struct{}{}
vm.last = target
if len(vm.versions) == 1 {
vm.initial = target
}
return target, nil
}
func findLimit(m map[uint64]struct{}, cmp func(uint64, uint64) bool, init uint64) uint64 {
for x := range m {
if cmp(x, init) {
init = x
}
}
return init
}
func (vm *VersionManager) Delete(target uint64) {
delete(vm.versions, target)
if target == vm.last {
vm.last = findLimit(vm.versions, func(x, max uint64) bool { return x > max }, 0)
}
if target == vm.initial {
vm.initial = findLimit(vm.versions, func(x, min uint64) bool { return x < min }, vm.last)
}
}
type vmIterator struct {
ch <-chan uint64
open bool
buf uint64
}
func (vi *vmIterator) Next() bool {
vi.buf, vi.open = <-vi.ch
return vi.open
}
func (vi *vmIterator) Value() uint64 { return vi.buf }
// Iterator implements VersionSet.
func (vm *VersionManager) Iterator() VersionIterator {
ch := make(chan uint64)
go func() {
for ver := range vm.versions {
ch <- ver
}
close(ch)
}()
return &vmIterator{ch: ch}
}
// Count implements VersionSet.
func (vm *VersionManager) Count() int { return len(vm.versions) }
// Equal implements VersionSet.
func (vm *VersionManager) Equal(that VersionSet) bool {
if vm.Count() != that.Count() {
return false
}
for it := that.Iterator(); it.Next(); {
if !vm.Exists(it.Value()) {
return false
}
}
return true
}
func (vm *VersionManager) Copy() *VersionManager {
vmap := make(map[uint64]struct{})
for ver := range vm.versions {
vmap[ver] = struct{}{}
}
return &VersionManager{versions: vmap, initial: vm.initial, last: vm.last}
}

View File

@ -1,59 +0,0 @@
package db_test
import (
"sort"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/db"
)
// Test that VersionManager satisfies the behavior of VersionSet
func TestVersionManager(t *testing.T) {
vm := db.NewVersionManager(nil)
require.Equal(t, uint64(0), vm.Last())
require.Equal(t, 0, vm.Count())
require.True(t, vm.Equal(vm))
require.False(t, vm.Exists(0))
id1, err := vm.Save(0)
require.NoError(t, err)
require.Equal(t, uint64(1), id1)
require.True(t, vm.Exists(id1))
id2, err := vm.Save(0)
require.NoError(t, err)
require.True(t, vm.Exists(id2))
id3, err := vm.Save(0)
require.NoError(t, err)
require.True(t, vm.Exists(id3))
_, err = vm.Save(id1) // can't save existing id
require.Error(t, err)
id4, err := vm.Save(0)
require.NoError(t, err)
require.True(t, vm.Exists(id4))
vm.Delete(id4)
require.False(t, vm.Exists(id4))
vm.Delete(id1)
require.False(t, vm.Exists(id1))
require.Equal(t, id2, vm.Initial())
require.Equal(t, id3, vm.Last())
var all []uint64
for it := vm.Iterator(); it.Next(); {
all = append(all, it.Value())
}
sort.Slice(all, func(i, j int) bool { return all[i] < all[j] })
require.Equal(t, []uint64{id2, id3}, all)
vmc := vm.Copy()
id5, err := vmc.Save(0)
require.NoError(t, err)
require.False(t, vm.Exists(id5)) // true copy is made
vm2 := db.NewVersionManager([]uint64{id2, id3})
require.True(t, vm.Equal(vm2))
}