refactor: remove storev2alpha1 (#13371)

This commit is contained in:
Jacob Gadikian 2022-09-24 04:53:58 +07:00 committed by GitHub
parent ddf1dd4915
commit b10097fdcf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1 additions and 4249 deletions

View File

@ -113,8 +113,8 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking Changes
* !(storev2alpha1) [#13370](https://github.com/cosmos/cosmos-sdk/pull/13370) remove storev2alpha1
* (tx) [#12659](https://github.com/cosmos/cosmos-sdk/pull/12659) Remove broadcast mode `block`.
* (db) [#13370](https://github.com/cosmos/cosmos-sdk/pull/13370) remove storev2alpha1, see also https://github.com/cosmos/cosmos-sdk/pull/13371
* (context) [#13063](https://github.com/cosmos/cosmos-sdk/pull/13063) Update `Context#CacheContext` to automatically emit all events on the parent context's `EventManager`.
* (x/bank) [#12706](https://github.com/cosmos/cosmos-sdk/pull/12706) Removed the `testutil` package from the `x/bank/client` package.
* (simapp) [#12747](https://github.com/cosmos/cosmos-sdk/pull/12747) Remove `simapp.MakeTestEncodingConfig`. Please use `moduletestutil.MakeTestEncodingConfig` (`types/module/testutil`) in tests instead.

View File

@ -8,7 +8,6 @@ import (
pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types"
"github.com/cosmos/cosmos-sdk/store/transient"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
)
var k, v = []byte("hello"), []byte("world")
@ -32,5 +31,4 @@ func TestTransientStore(t *testing.T) {
emptyCommitID := tstore.LastCommitID()
require.Equal(t, emptyCommitID.Version, int64(0))
require.True(t, bytes.Equal(emptyCommitID.Hash, nil))
require.Equal(t, types.StoreTypeTransient, tstore.GetStoreType())
}

View File

@ -1,92 +0,0 @@
package dbadapter
import (
"io"
dbm "github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/internal/db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
)
var _ types.KVStore = Store{}
// Wrapper type for dbm.Db with implementation of KVStore
type Store struct {
DB dbm.ReadWriter
}
// Get wraps the underlying DB's Get method panicing on error.
func (dsa Store) Get(key []byte) []byte {
v, err := dsa.DB.Get(key)
if err != nil {
panic(err)
}
return v
}
// Has wraps the underlying DB's Has method panicing on error.
func (dsa Store) Has(key []byte) bool {
ok, err := dsa.DB.Has(key)
if err != nil {
panic(err)
}
return ok
}
// Set wraps the underlying DB's Set method panicing on error.
func (dsa Store) Set(key, value []byte) {
types.AssertValidKey(key)
if err := dsa.DB.Set(key, value); err != nil {
panic(err)
}
}
// Delete wraps the underlying DB's Delete method panicing on error.
func (dsa Store) Delete(key []byte) {
if err := dsa.DB.Delete(key); err != nil {
panic(err)
}
}
// Iterator wraps the underlying DB's Iterator method panicing on error.
func (dsa Store) Iterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.Iterator(start, end)
if err != nil {
panic(err)
}
return dbutil.ToStoreIterator(iter)
}
// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error.
func (dsa Store) ReverseIterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return dbutil.ToStoreIterator(iter)
}
// GetStoreType returns the type of the store.
func (dsa Store) GetStoreType() types.StoreType {
return types.StoreTypeDB
}
// CacheWrap branches the underlying store.
func (dsa Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(dsa)
}
// CacheWrapWithTrace implements KVStore.
func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(dsa, w, tc))
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (dsa Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(dsa, storeKey, listeners))
}

View File

@ -1,105 +0,0 @@
package dbadapter_test
import (
"bytes"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/store/cachekv"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/dbadapter"
mocks "github.com/cosmos/cosmos-sdk/testutil/mock/db"
)
var errFoo = errors.New("dummy")
func TestAccessors(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDB := mocks.NewMockReadWriter(mockCtrl)
store := dbadapter.Store{mockDB}
key := []byte("test")
value := []byte("testvalue")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Equal(t, types.StoreTypeDB, store.GetStoreType())
retFoo := []byte("xxx")
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(retFoo, nil)
require.True(t, bytes.Equal(retFoo, store.Get(key)))
require.Equal(t, []byte{1, 2, 3}, []byte{1, 2, 3})
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Get(key) })
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(true, nil)
require.True(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, nil)
require.False(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, errFoo)
require.Panics(t, func() { store.Has(key) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Delete(key) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Delete(key) })
}
func TestIterators(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDB := mocks.NewMockReadWriter(mockCtrl)
store := dbadapter.Store{mockDB}
key := []byte("test")
value := []byte("testvalue")
start, end := key, []byte("test_end")
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Iterator(start, end) })
mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.ReverseIterator(start, end) })
mockIter := mocks.NewMockIterator(mockCtrl)
mockIter.EXPECT().Next().Times(1).Return(true)
mockIter.EXPECT().Key().Times(1).Return(key)
mockIter.EXPECT().Value().Times(1).Return(value)
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(mockIter, nil)
iter := store.Iterator(start, end)
require.Equal(t, key, iter.Key())
require.Equal(t, value, iter.Value())
}
func TestCacheWraps(t *testing.T) {
mockCtrl := gomock.NewController(t)
mockDB := mocks.NewMockReadWriter(mockCtrl)
store := dbadapter.Store{mockDB}
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners)
}

View File

@ -1,45 +0,0 @@
package mem
import (
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/dbadapter"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.Committer = (*Store)(nil)
)
// Store implements an in-memory only KVStore. Entries are persisted between
// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node
type Store struct {
dbadapter.Store
conn dbm.Connection
}
// NewStore constructs a new in-memory store.
func NewStore() *Store {
db := memdb.NewDB()
return &Store{
Store: dbadapter.Store{DB: db.ReadWriter()},
conn: db,
}
}
// GetStoreType returns the Store's type.
func (s Store) GetStoreType() types.StoreType {
return types.StoreTypeMemory
}
// Commit commits to the underlying DB.
func (s *Store) Commit() (id types.CommitID) {
return
}
func (s *Store) SetPruning(pruningtypes.PruningOptions) {}
func (s *Store) GetPruning() pruningtypes.PruningOptions { return pruningtypes.PruningOptions{} }
func (s Store) LastCommitID() (id types.CommitID) { return }

View File

@ -1,39 +0,0 @@
package mem_test
import (
"testing"
"github.com/stretchr/testify/require"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/mem"
)
func TestStore(t *testing.T) {
store := mem.NewStore()
key, value := []byte("key"), []byte("value")
require.Equal(t, types.StoreTypeMemory, store.GetStoreType())
require.Nil(t, store.Get(key))
store.Set(key, value)
require.Equal(t, value, store.Get(key))
newValue := []byte("newValue")
store.Set(key, newValue)
require.Equal(t, newValue, store.Get(key))
store.Delete(key)
require.Nil(t, store.Get(key))
}
func TestCommit(t *testing.T) {
store := mem.NewStore()
key, value := []byte("key"), []byte("value")
store.Set(key, value)
id := store.Commit()
require.True(t, id.IsZero())
require.True(t, store.LastCommitID().IsZero())
require.Equal(t, value, store.Get(key))
}

View File

@ -1,36 +0,0 @@
package multi
import (
"github.com/cosmos/cosmos-sdk/store/cachekv"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
)
// GetKVStore implements BasicMultiStore.
func (cs *cacheStore) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
sub, has := cs.substores[key]
if !has {
sub = cachekv.NewStore(cs.source.GetKVStore(skey))
cs.substores[key] = sub
}
// Wrap with trace/listen if needed. Note: we don't cache this, so users must get a new substore after
// modifying tracers/listeners.
return cs.wrapTraceListen(sub, skey)
}
// Write implements CacheMultiStore.
func (cs *cacheStore) Write() {
for _, sub := range cs.substores {
sub.Write()
}
}
// CacheMultiStore implements BasicMultiStore.
// This recursively wraps the CacheMultiStore in another cache store.
func (cs *cacheStore) CacheMultiStore() types.CacheMultiStore {
return &cacheStore{
source: cs,
substores: map[string]types.CacheKVStore{},
traceListenMixin: newTraceListenMixin(),
}
}

View File

@ -1,19 +0,0 @@
// This package provides concrete implementations of the store/v2alpha1 "MultiStore" types, including
// CommitMultiStore, CacheMultiStore, and BasicMultiStore (as read-only stores at past versions).
//
// Substores are declared as part of a schema within StoreOptions.
// The schema cannot be changed once a CommitMultiStore is initialized, and changes to the schema must be done
// by migrating via StoreOptions.Upgrades. If a past version is accessed, it will be loaded with the past schema.
// Stores may be declared as StoreTypePersistent, StoreTypeMemory (not persisted after close), or
// StoreTypeTransient (not persisted across commits). Non-persistent substores cannot be migrated or accessed
// in past versions.
//
// A declared persistent substore is initially empty and stores nothing in the backing DB until a value is set.
// A non-empty store is stored within a prefixed subdomain of the backing DB (using db/prefix).
// If the MultiStore is configured to use a separate Connection for StateCommitmentDB, it will store the
// state commitment (SC) store (as an SMT) in subdomains there, and the "flat" state is stored in the main DB.
// Each substore's SC is allocated as an independent SMT, and query proofs contain two components: a proof
// of a key's (non)existence within the substore SMT, and a proof of the substore's existence within the
// MultiStore (using the Merkle map proof spec (TendermintSpec)).
package multi

View File

@ -1,78 +0,0 @@
package multi
import (
"sort"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/mem"
v1Store "github.com/cosmos/cosmos-sdk/store/rootmulti"
"github.com/cosmos/cosmos-sdk/store/transient"
"github.com/cosmos/cosmos-sdk/store/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
// MigrateFromV1 will migrate the state from iavl to smt
func MigrateFromV1(rootMultiStore *v1Store.Store, store2db dbm.Connection, storeConfig StoreConfig) (*Store, error) {
type namedStore struct {
*iavl.Store
name string
}
storeKeysByName := rootMultiStore.StoreKeysByName()
keys := make([]string, 0, len(storeKeysByName))
for key := range storeKeysByName {
keys = append(keys, key)
}
sort.Strings(keys)
var stores []namedStore
for _, key := range keys {
storeKey := storeKeysByName[key]
keyName := storeKey.Name()
switch store := rootMultiStore.GetStoreByName(keyName).(type) {
case *iavl.Store:
err := storeConfig.RegisterSubstore(keyName, types.StoreTypePersistent)
if err != nil {
return nil, err
}
stores = append(stores, namedStore{name: keyName, Store: store})
case *transient.Store, *mem.Store:
continue
default:
return nil, sdkerrors.Wrapf(sdkerrors.ErrLogic, "don't know how to migrate store %q of type %T", keyName, store)
}
}
// creating the new store of smt tree
rootStore, err := NewStore(store2db, storeConfig)
if err != nil {
return nil, err
}
// if version is 0 there is no state data to commit
if rootMultiStore.LastCommitID().Version == 0 {
return rootStore, nil
}
// iterate through the rootmulti stores and save the key/values into smt tree
for _, store := range stores {
subStore, err := rootStore.getSubstore(store.name)
if err != nil {
return nil, err
}
// iterate all iavl tree node key/values
iterator := store.Iterator(nil, nil)
for ; iterator.Valid(); iterator.Next() {
// set the iavl key,values into smt node
subStore.Set(iterator.Key(), iterator.Value())
}
}
// commit the all key/values from iavl to smt tree (SMT Store)
_, err = rootStore.commit(uint64(rootMultiStore.LastCommitID().Version))
if err != nil {
return nil, err
}
return rootStore, nil
}

View File

@ -1,107 +0,0 @@
package multi
import (
"encoding/binary"
"fmt"
"math/rand"
"testing"
"github.com/cosmos/cosmos-sdk/db/memdb"
"github.com/cosmos/cosmos-sdk/store/iavl"
"github.com/cosmos/cosmos-sdk/store/rootmulti"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
)
func TestMigrationV2(t *testing.T) {
r := rand.New(rand.NewSource(49872768940))
// setup a rootmulti store
db := dbm.NewMemDB()
v1Store := rootmulti.NewStore(db, log.NewNopLogger())
// mount the kvStores
var keys []*types.KVStoreKey
for i := uint8(0); i < 10; i++ {
key := types.NewKVStoreKey(fmt.Sprintf("store%v", i))
v1Store.MountStoreWithDB(key, types.StoreTypeIAVL, nil)
keys = append(keys, key)
}
err := v1Store.LoadLatestVersion()
require.Nil(t, err)
// setup a random test data
for _, key := range keys {
store := v1Store.GetStore(key).(*iavl.Store)
store.Set([]byte("temp_data"), []byte("one"))
for i := 0; i < len(keys); i++ {
k := make([]byte, 8)
v := make([]byte, 1024)
binary.BigEndian.PutUint64(k, uint64(i))
_, err := r.Read(v)
if err != nil {
panic(err)
}
store.Set(k, v)
}
}
testCases := []struct {
testName string
emptyStore bool
}{
{
"Migration With Empty Store",
true,
},
{
"Migration From Root Multi Store (IAVL) to SMT ",
false,
},
}
for _, testCase := range testCases {
if !testCase.emptyStore {
v1Store.Commit()
}
// setup a new root store of smt
db2 := memdb.NewDB()
storeConfig := DefaultStoreConfig()
// migrating the iavl store (v1) to smt store (v2)
v2Store, err := MigrateFromV1(v1Store, db2, storeConfig)
require.NoError(t, err)
for _, key := range keys {
v2StoreKVStore := v2Store.GetKVStore(key)
if testCase.emptyStore {
// check the empty store
require.Nil(t, v2StoreKVStore.Get([]byte("temp_data")))
} else {
require.Equal(t, v2StoreKVStore.Get([]byte("temp_data")), []byte("one"))
}
require.Equal(t, v2Store.LastCommitID().Version, v1Store.LastCommitID().Version)
}
err = v2Store.Close()
require.NoError(t, err)
}
}
// TestMigrateV2ForEmptyStore checking empty store migration
func TestMigrateV2ForEmptyStore(t *testing.T) {
// setup a rootmulti store
db := dbm.NewMemDB()
v1Store := rootmulti.NewStore(db, log.NewNopLogger())
err := v1Store.LoadLatestVersion()
require.Nil(t, err)
db2 := memdb.NewDB()
storeConfig := DefaultStoreConfig()
// migrating the iavl store (v1) to smt store (v2)
v2Store, err := MigrateFromV1(v1Store, db2, storeConfig)
require.NoError(t, err)
require.Equal(t, v2Store.LastCommitID(), v1Store.LastCommitID())
}

View File

@ -1,52 +0,0 @@
package multi
import (
"crypto/sha256"
"github.com/tendermint/tendermint/crypto/merkle"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/smt"
)
// DefaultProofRuntime returns a ProofRuntime supporting SMT and simple merkle proofs.
func DefaultProofRuntime() (prt *merkle.ProofRuntime) {
prt = merkle.NewProofRuntime()
prt.RegisterOpDecoder(types.ProofOpSMTCommitment, types.CommitmentOpDecoder)
prt.RegisterOpDecoder(types.ProofOpSimpleMerkleCommitment, types.CommitmentOpDecoder)
return prt
}
// Prove commitment of key within an smt store and return ProofOps
func proveKey(s *smt.Store, key []byte) (*tmcrypto.ProofOps, error) {
var ret tmcrypto.ProofOps
keyProof, err := s.GetProofICS23(key)
if err != nil {
return nil, err
}
hkey := sha256.Sum256(key)
ret.Ops = append(ret.Ops, types.NewSmtCommitmentOp(hkey[:], keyProof).ProofOp())
return &ret, nil
}
// GetProof returns ProofOps containing: a proof for the given key within this substore;
// and a proof of the substore's existence within the MultiStore.
func (s *viewSubstore) GetProof(key []byte) (*tmcrypto.ProofOps, error) {
ret, err := proveKey(s.stateCommitmentStore, key)
if err != nil {
return nil, err
}
// Prove commitment of substore within root store
storeHashes, err := s.root.getMerkleRoots()
if err != nil {
return nil, err
}
storeProof, err := types.ProofOpFromMap(storeHashes, s.name)
if err != nil {
return nil, err
}
ret.Ops = append(ret.Ops, storeProof)
return ret, nil
}

View File

@ -1,125 +0,0 @@
package multi
import (
"crypto/sha256"
"testing"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/cosmos/cosmos-sdk/db/memdb"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/smt"
)
// We hash keys produce SMT paths, so reflect that here
func keyPath(prefix, key string) string {
hashed := sha256.Sum256([]byte(key))
return prefix + string(hashed[:])
}
func TestVerifySMTStoreProof(t *testing.T) {
// Create main tree for testing.
txn := memdb.NewDB().ReadWriter()
store := smt.NewStore(txn)
store.Set([]byte("MYKEY"), []byte("MYVALUE"))
root := store.Root()
res, err := proveKey(store, []byte("MYKEY"))
require.NoError(t, err)
// Verify good proof.
prt := DefaultProofRuntime()
err = prt.VerifyValue(res, root, keyPath("/", "MYKEY"), []byte("MYVALUE"))
require.NoError(t, err)
// Fail to verify bad proofs.
err = prt.VerifyValue(res, root, keyPath("/", "MYKEY_NOT"), []byte("MYVALUE"))
require.Error(t, err)
err = prt.VerifyValue(res, root, keyPath("/", "MYKEY/MYKEY"), []byte("MYVALUE"))
require.Error(t, err)
err = prt.VerifyValue(res, root, keyPath("", "MYKEY"), []byte("MYVALUE"))
require.Error(t, err)
err = prt.VerifyValue(res, root, keyPath("/", "MYKEY"), []byte("MYVALUE_NOT"))
require.Error(t, err)
err = prt.VerifyValue(res, root, keyPath("/", "MYKEY"), []byte(nil))
require.Error(t, err)
}
func TestVerifyMultiStoreQueryProof(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
substore := store.GetKVStore(skey_1)
substore.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit()
res := store.Query(abci.RequestQuery{
Path: "/store1/key", // required path to get key/value+proof
Data: []byte("MYKEY"),
Prove: true,
})
require.NotNil(t, res.ProofOps)
// Verify good proofs.
prt := DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/store1/", "MYKEY"), []byte("MYVALUE"))
require.NoError(t, err)
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/store1/", "MYKEY"), []byte("MYVALUE"))
require.NoError(t, err)
// Fail to verify bad proofs.
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/store1/", "MYKEY_NOT"), []byte("MYVALUE"))
require.Error(t, err)
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/store1/MYKEY/", "MYKEY"), []byte("MYVALUE"))
require.Error(t, err)
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("store1/", "MYKEY"), []byte("MYVALUE"))
require.Error(t, err)
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/", "MYKEY"), []byte("MYVALUE"))
require.Error(t, err)
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/store1/", "MYKEY"), []byte("MYVALUE_NOT"))
require.Error(t, err)
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/store1/", "MYKEY"), []byte(nil))
require.Error(t, err)
}
func TestVerifyMultiStoreQueryProofAbsence(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
substore := store.GetKVStore(skey_1)
substore.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit()
res := store.Query(abci.RequestQuery{
Path: "/store1/key", // required path to get key/value+proof
Data: []byte("MYABSENTKEY"),
Prove: true,
})
require.NotNil(t, res.ProofOps)
// Verify good proof.
prt := DefaultProofRuntime()
err = prt.VerifyAbsence(res.ProofOps, cid.Hash, keyPath("/store1/", "MYABSENTKEY"))
require.NoError(t, err)
// Fail to verify bad proofs.
prt = DefaultProofRuntime()
err = prt.VerifyAbsence(res.ProofOps, cid.Hash, keyPath("/", "MYABSENTKEY"))
require.Error(t, err)
prt = DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, keyPath("/store1/", "MYABSENTKEY"), []byte(""))
require.Error(t, err)
}

View File

@ -1,172 +0,0 @@
package multi
import (
"bytes"
"fmt"
"io"
"sort"
protoio "github.com/cosmos/gogoproto/io"
"github.com/cosmos/cosmos-sdk/snapshots"
snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
// Snapshot implements snapshottypes.Snapshotter.
func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error {
if height == 0 {
return snapshottypes.ErrInvalidSnapshotVersion
}
if height > uint64(rs.LastCommitID().Version) {
return sdkerrors.Wrapf(sdkerrors.ErrLogic, "cannot snapshot future height %v", height)
}
// get the saved snapshot at height
vs, err := rs.getView(int64(height))
if err != nil {
return sdkerrors.Wrap(err, fmt.Sprintf("error while get the version at height %d", height))
}
// sending the snapshot store schema
var storeByteKeys [][]byte
for sKey := range vs.schema {
if vs.schema[sKey] == storetypes.StoreTypePersistent {
storeByteKeys = append(storeByteKeys, []byte(sKey))
}
}
sort.Slice(storeByteKeys, func(i, j int) bool {
return bytes.Compare(storeByteKeys[i], storeByteKeys[j]) == -1
})
err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{
Item: &snapshottypes.SnapshotItem_Schema{
Schema: &snapshottypes.SnapshotSchema{
Keys: storeByteKeys,
},
},
})
if err != nil {
return err
}
for _, sKey := range storeByteKeys {
subStore, err := vs.getSubstore(string(sKey))
if err != nil {
return err
}
err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{
Item: &snapshottypes.SnapshotItem_Store{
Store: &snapshottypes.SnapshotStoreItem{
Name: string(sKey),
},
},
})
if err != nil {
return err
}
iter := subStore.Iterator(nil, nil)
for ; iter.Valid(); iter.Next() {
err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{
Item: &snapshottypes.SnapshotItem_KV{
KV: &snapshottypes.SnapshotKVItem{
Key: iter.Key(),
Value: iter.Value(),
},
},
})
if err != nil {
return err
}
}
err = iter.Close()
if err != nil {
return err
}
}
return nil
}
// Restore implements snapshottypes.Snapshotter.
func (rs *Store) Restore(
height uint64, format uint32, protoReader protoio.Reader,
) (snapshottypes.SnapshotItem, error) {
if err := snapshots.ValidRestoreHeight(format, height); err != nil {
return snapshottypes.SnapshotItem{}, err
}
if rs.LastCommitID().Version != 0 {
return snapshottypes.SnapshotItem{}, sdkerrors.Wrapf(sdkerrors.ErrLogic, "cannot restore snapshot for non empty store at height %v", height)
}
var subStore *substore
storeSchemaReceived := false
var snapshotItem snapshottypes.SnapshotItem
loop:
for {
snapshotItem = snapshottypes.SnapshotItem{}
err := protoReader.ReadMsg(&snapshotItem)
if err == io.EOF {
break
} else if err != nil {
return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(err, "invalid protobuf message")
}
switch item := snapshotItem.Item.(type) {
case *snapshottypes.SnapshotItem_Schema:
receivedStoreSchema := make(StoreSchema, len(item.Schema.GetKeys()))
storeSchemaReceived = true
for _, sKey := range item.Schema.GetKeys() {
receivedStoreSchema[string(sKey)] = types.StoreTypePersistent
}
if !rs.schema.equal(receivedStoreSchema) {
return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(sdkerrors.ErrLogic, "received schema does not match app schema")
}
case *snapshottypes.SnapshotItem_Store:
storeName := item.Store.GetName()
// checking the store schema is received or not
if !storeSchemaReceived {
return snapshottypes.SnapshotItem{}, sdkerrors.Wrapf(sdkerrors.ErrLogic, "received store name before store schema %s", storeName)
}
// checking the store schema exists or not
if _, has := rs.schema[storeName]; !has {
return snapshottypes.SnapshotItem{}, sdkerrors.Wrapf(sdkerrors.ErrLogic, "store is missing from schema %s", storeName)
}
// get the substore
subStore, err = rs.getSubstore(storeName)
if err != nil {
return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(err, fmt.Sprintf("error while getting the substore for key %s", storeName))
}
case *snapshottypes.SnapshotItem_KV:
if subStore == nil {
return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(sdkerrors.ErrLogic, "received KV Item before store item")
}
// update the key/value SMT.Store
subStore.Set(item.KV.Key, item.KV.Value)
default:
break loop
}
}
// commit the all key/values to store
_, err := rs.commit(height)
if err != nil {
return snapshotItem, sdkerrors.Wrap(err, fmt.Sprintf("error during commit the store at height %d", height))
}
return snapshotItem, nil
}

View File

@ -1,317 +0,0 @@
package multi
import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math/rand"
"sort"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types"
"github.com/cosmos/cosmos-sdk/snapshots"
snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types"
"github.com/cosmos/cosmos-sdk/store/types"
)
func multiStoreConfig(t *testing.T, stores int) StoreConfig {
opts := DefaultStoreConfig()
opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)
for i := 0; i < stores; i++ {
sKey := types.NewKVStoreKey(fmt.Sprintf("store%d", i))
require.NoError(t, opts.RegisterSubstore(sKey.Name(), types.StoreTypePersistent))
}
return opts
}
func newMultiStoreWithGeneratedData(t *testing.T, db dbm.Connection, stores int, storeKeys uint64) *Store {
cfg := multiStoreConfig(t, stores)
store, err := NewStore(db, cfg)
require.NoError(t, err)
r := rand.New(rand.NewSource(49872768940)) // Fixed seed for deterministic tests
var sKeys []string
for sKey := range store.schema {
sKeys = append(sKeys, sKey)
}
sort.Slice(sKeys, func(i, j int) bool {
return strings.Compare(sKeys[i], sKeys[j]) == -1
})
for _, sKey := range sKeys {
sStore, err := store.getSubstore(sKey)
require.NoError(t, err)
for i := uint64(0); i < storeKeys; i++ {
k := make([]byte, 8)
v := make([]byte, 1024)
binary.BigEndian.PutUint64(k, i)
_, err := r.Read(v)
if err != nil {
panic(err)
}
sStore.Set(k, v)
}
}
store.Commit()
return store
}
func newMultiStoreWithBasicData(t *testing.T, db dbm.Connection, stores int) *Store {
cfg := multiStoreConfig(t, stores)
store, err := NewStore(db, cfg)
require.NoError(t, err)
for sKey := range store.schema {
sStore, err := store.getSubstore(sKey)
require.NoError(t, err)
for k, v := range alohaData {
sStore.Set([]byte(k), []byte(v))
}
}
store.Commit()
return store
}
func newMultiStore(t *testing.T, db dbm.Connection, stores int) *Store {
cfg := multiStoreConfig(t, stores)
store, err := NewStore(db, cfg)
require.NoError(t, err)
return store
}
func TestMultistoreSnapshot_Errors(t *testing.T) {
store := newMultiStoreWithBasicData(t, memdb.NewDB(), 4)
testcases := map[string]struct {
height uint64
expectType error
}{
"0 height": {0, snapshottypes.ErrInvalidSnapshotVersion},
"1 height": {1, nil},
}
for name, tc := range testcases {
tc := tc
t.Run(name, func(t *testing.T) {
chunks := make(chan io.ReadCloser)
streamWriter := snapshots.NewStreamWriter(chunks)
err := store.Snapshot(tc.height, streamWriter)
if tc.expectType != nil {
assert.True(t, errors.Is(err, tc.expectType))
}
})
}
}
func TestMultistoreRestore_Errors(t *testing.T) {
store := newMultiStoreWithBasicData(t, memdb.NewDB(), 4)
testcases := map[string]struct {
height uint64
format uint32
expectErrorType error
}{
"0 height": {0, snapshottypes.CurrentFormat, nil},
"0 format": {1, 0, snapshottypes.ErrUnknownFormat},
"unknown format": {1, 9, snapshottypes.ErrUnknownFormat},
}
for name, tc := range testcases {
tc := tc
t.Run(name, func(t *testing.T) {
_, err := store.Restore(tc.height, tc.format, nil)
require.Error(t, err)
if tc.expectErrorType != nil {
assert.True(t, errors.Is(err, tc.expectErrorType))
}
})
}
}
func TestMultistoreSnapshot_Checksum(t *testing.T) {
store := newMultiStoreWithGeneratedData(t, memdb.NewDB(), 5, 10000)
version := uint64(store.LastCommitID().Version)
testcases := []struct {
format uint32
chunkHashes []string
}{
{1, []string{
"b0635a30d94d56b6cd1073fbfa109fa90b194d0ff2397659b00934c844a1f6fb",
"8c32e05f312cf2dee6b7d2bdb41e1a2bb2372697f25504e676af1718245d8b63",
"05dfef0e32c34ef3900300f9de51f228d7fb204fa8f4e4d0d1529f083d122029",
"77d30aeeb427b0bdcedf3639adde1e822c15233d652782e171125280875aa492",
"c00c3801da889ea4370f0e647ffe1e291bd47f500e2a7269611eb4cc198b993f",
"6d565eb28776631f3e3e764decd53436c3be073a8a01fa5434afd539f9ae6eda",
}},
}
for _, tc := range testcases {
tc := tc
t.Run(fmt.Sprintf("Format %v", tc.format), func(t *testing.T) {
chunks := make(chan io.ReadCloser, 100)
hashes := []string{}
go func() {
streamWriter := snapshots.NewStreamWriter(chunks)
defer streamWriter.Close()
require.NotNil(t, streamWriter)
err := store.Snapshot(version, streamWriter)
require.NoError(t, err)
}()
hasher := sha256.New()
for chunk := range chunks {
hasher.Reset()
_, err := io.Copy(hasher, chunk)
require.NoError(t, err)
hashes = append(hashes, hex.EncodeToString(hasher.Sum(nil)))
}
assert.Equal(t, tc.chunkHashes, hashes, "Snapshot output for format %v has changed", tc.format)
})
}
}
func TestMultistoreSnapshotRestore(t *testing.T) {
source := newMultiStoreWithGeneratedData(t, memdb.NewDB(), 3, 4)
target := newMultiStore(t, memdb.NewDB(), 3)
require.Equal(t, source.LastCommitID().Version, int64(1))
version := uint64(source.LastCommitID().Version)
// check for target store restore
require.Equal(t, target.LastCommitID().Version, int64(0))
dummyExtensionItem := snapshottypes.SnapshotItem{
Item: &snapshottypes.SnapshotItem_Extension{
Extension: &snapshottypes.SnapshotExtensionMeta{
Name: "test",
Format: 1,
},
},
}
chunks := make(chan io.ReadCloser, 100)
go func() {
streamWriter := snapshots.NewStreamWriter(chunks)
require.NotNil(t, streamWriter)
defer streamWriter.Close()
err := source.Snapshot(version, streamWriter)
require.NoError(t, err)
// write an extension metadata
err = streamWriter.WriteMsg(&dummyExtensionItem)
require.NoError(t, err)
}()
streamReader, err := snapshots.NewStreamReader(chunks)
require.NoError(t, err)
nextItem, err := target.Restore(version, snapshottypes.CurrentFormat, streamReader)
require.NoError(t, err)
require.Equal(t, *dummyExtensionItem.GetExtension(), *nextItem.GetExtension())
assert.Equal(t, source.LastCommitID(), target.LastCommitID())
for sKey := range source.schema {
sourceSubStore, err := source.getSubstore(sKey)
require.NoError(t, err)
targetSubStore, err := target.getSubstore(sKey)
require.NoError(t, err)
require.Equal(t, sourceSubStore, targetSubStore)
}
// checking snapshot restoring for store with existed schema and without existing versions
target3 := newMultiStore(t, memdb.NewDB(), 4)
chunks3 := make(chan io.ReadCloser, 100)
go func() {
streamWriter3 := snapshots.NewStreamWriter(chunks3)
require.NotNil(t, streamWriter3)
defer streamWriter3.Close()
err := source.Snapshot(version, streamWriter3)
require.NoError(t, err)
}()
streamReader3, err := snapshots.NewStreamReader(chunks3)
require.NoError(t, err)
_, err = target3.Restore(version, snapshottypes.CurrentFormat, streamReader3)
require.Error(t, err)
}
func BenchmarkMultistoreSnapshot100K(b *testing.B) {
benchmarkMultistoreSnapshot(b, 10, 10000)
}
func BenchmarkMultistoreSnapshot1M(b *testing.B) {
benchmarkMultistoreSnapshot(b, 10, 100000)
}
func BenchmarkMultistoreSnapshotRestore100K(b *testing.B) {
benchmarkMultistoreSnapshotRestore(b, 10, 10000)
}
func BenchmarkMultistoreSnapshotRestore1M(b *testing.B) {
benchmarkMultistoreSnapshotRestore(b, 10, 100000)
}
func benchmarkMultistoreSnapshot(b *testing.B, stores int, storeKeys uint64) {
b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.")
b.ReportAllocs()
b.StopTimer()
source := newMultiStoreWithGeneratedData(nil, memdb.NewDB(), stores, storeKeys)
version := source.LastCommitID().Version
require.EqualValues(b, 1, version)
b.StartTimer()
for i := 0; i < b.N; i++ {
target := newMultiStore(nil, memdb.NewDB(), stores)
require.EqualValues(b, 0, target.LastCommitID().Version)
chunks := make(chan io.ReadCloser)
go func() {
streamWriter := snapshots.NewStreamWriter(chunks)
require.NotNil(b, streamWriter)
err := source.Snapshot(uint64(version), streamWriter)
require.NoError(b, err)
}()
for reader := range chunks {
_, err := io.Copy(io.Discard, reader)
require.NoError(b, err)
err = reader.Close()
require.NoError(b, err)
}
}
}
func benchmarkMultistoreSnapshotRestore(b *testing.B, stores int, storeKeys uint64) {
b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.")
b.ReportAllocs()
b.StopTimer()
source := newMultiStoreWithGeneratedData(nil, memdb.NewDB(), stores, storeKeys)
version := uint64(source.LastCommitID().Version)
require.EqualValues(b, 1, version)
b.StartTimer()
for i := 0; i < b.N; i++ {
target := newMultiStore(nil, memdb.NewDB(), stores)
require.EqualValues(b, 0, target.LastCommitID().Version)
chunks := make(chan io.ReadCloser)
go func() {
writer := snapshots.NewStreamWriter(chunks)
require.NotNil(b, writer)
err := source.Snapshot(version, writer)
require.NoError(b, err)
}()
reader, err := snapshots.NewStreamReader(chunks)
require.NoError(b, err)
_, err = target.Restore(version, snapshottypes.CurrentFormat, reader)
require.NoError(b, err)
require.Equal(b, source.LastCommitID(), target.LastCommitID())
}
}

View File

@ -1,921 +0,0 @@
package multi
import (
"errors"
"fmt"
"io"
"math"
"sort"
"strings"
"sync"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/cosmos/cosmos-sdk/db"
prefixdb "github.com/cosmos/cosmos-sdk/db/prefix"
util "github.com/cosmos/cosmos-sdk/internal"
pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types"
sdkmaps "github.com/cosmos/cosmos-sdk/store/internal/maps"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/prefix"
"github.com/cosmos/cosmos-sdk/store/tracekv"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/mem"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/smt"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/transient"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
_ types.Queryable = (*Store)(nil)
_ types.CommitMultiStore = (*Store)(nil)
_ types.CacheMultiStore = (*cacheStore)(nil)
_ types.BasicMultiStore = (*viewStore)(nil)
_ types.KVStore = (*substore)(nil)
)
var (
// Root prefixes
merkleRootKey = []byte{0} // Key for root hash of namespace tree
schemaPrefix = []byte{1} // Prefix for store keys (namespaces)
contentPrefix = []byte{2} // Prefix for store contents
// Per-substore prefixes
substoreMerkleRootKey = []byte{0} // Key for root hashes of Merkle trees
dataPrefix = []byte{1} // Prefix for state mappings
indexPrefix = []byte{2} // Prefix for Store reverse index
smtPrefix = []byte{3} // Prefix for SMT data
ErrVersionDoesNotExist = errors.New("version does not exist")
ErrMaximumHeight = errors.New("maximum block height reached")
)
func ErrStoreNotFound(skey string) error {
return fmt.Errorf("store does not exist for key: %s", skey)
}
// StoreConfig is used to define a schema and other options and pass them to the MultiStore constructor.
type StoreConfig struct {
// Version pruning options for backing DBs.
Pruning pruningtypes.PruningOptions
// The minimum allowed version number.
InitialVersion uint64
// The backing DB to use for the state commitment Merkle tree data.
// If nil, Merkle data is stored in the state storage DB under a separate prefix.
StateCommitmentDB dbm.Connection
prefixRegistry
PersistentCache types.MultiStorePersistentCache
Upgrades []types.StoreUpgrades
*traceListenMixin
}
// StoreSchema defineds a mapping of substore keys to store types
type StoreSchema map[string]types.StoreType
// Store is the main persistent store type implementing CommitMultiStore.
// Substores consist of an SMT-based state commitment store and state storage.
// Substores must be reserved in the StoreConfig or defined as part of a StoreUpgrade in order to be valid.
// Note:
// The state commitment data and proof are structured in the same basic pattern as the MultiStore, but use an SMT rather than IAVL tree:
// * The state commitment store of each substore consists of a independent SMT.
// * The state commitment of the root store consists of a Merkle map of all registered persistent substore names to the root hash of their corresponding SMTs
type Store struct {
stateDB dbm.Connection
stateTxn dbm.ReadWriter
StateCommitmentDB dbm.Connection
stateCommitmentTxn dbm.ReadWriter
schema StoreSchema
mem *mem.Store
tran *transient.Store
mtx sync.RWMutex
// Copied from StoreConfig
Pruning pruningtypes.PruningOptions
InitialVersion uint64 // if
*traceListenMixin
PersistentCache types.MultiStorePersistentCache
substoreCache map[string]*substore
}
type substore struct {
root *Store
name string
dataBucket dbm.ReadWriter
indexBucket dbm.ReadWriter
stateCommitmentStore *smt.Store
}
// Branched state
type cacheStore struct {
source types.BasicMultiStore
substores map[string]types.CacheKVStore
*traceListenMixin
}
// Read-only store for querying past versions
type viewStore struct {
stateView dbm.Reader
stateCommitmentView dbm.Reader
substoreCache map[string]*viewSubstore
schema StoreSchema
}
type viewSubstore struct {
root *viewStore
name string
dataBucket dbm.Reader
indexBucket dbm.Reader
stateCommitmentStore *smt.Store
}
// Builder type used to create a valid schema with no prefix conflicts
type prefixRegistry struct {
StoreSchema
reserved []string
}
// Mixin type that to compose trace & listen state into each root store variant type
type traceListenMixin struct {
listeners map[string][]types.WriteListener
TraceWriter io.Writer
TraceContext types.TraceContext
}
func newTraceListenMixin() *traceListenMixin {
return &traceListenMixin{listeners: map[string][]types.WriteListener{}}
}
// DefaultStoreConfig returns a MultiStore config with an empty schema, a single backing DB,
// pruning with PruneDefault, no listeners and no tracer.
func DefaultStoreConfig() StoreConfig {
return StoreConfig{
Pruning: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault),
prefixRegistry: prefixRegistry{
StoreSchema: StoreSchema{},
},
traceListenMixin: newTraceListenMixin(),
}
}
// Returns true for valid store types for a MultiStore schema
func validSubStoreType(sst types.StoreType) bool {
switch sst {
case types.StoreTypePersistent:
return true
case types.StoreTypeMemory:
return true
case types.StoreTypeTransient:
return true
default:
return false
}
}
// Returns true iff both schema maps match exactly (including mem/tran stores)
func (ss StoreSchema) equal(that StoreSchema) bool {
if len(ss) != len(that) {
return false
}
for key, val := range that {
myval, has := ss[key]
if !has {
return false
}
if val != myval {
return false
}
}
return true
}
// Parses a schema from the DB
func readSavedSchema(bucket dbm.Reader) (*prefixRegistry, error) {
ret := prefixRegistry{StoreSchema: StoreSchema{}}
it, err := bucket.Iterator(nil, nil)
if err != nil {
return nil, err
}
for it.Next() {
value := it.Value()
if len(value) != 1 || !validSubStoreType(types.StoreType(value[0])) {
return nil, fmt.Errorf("invalid mapping for store key: %v => %v", it.Key(), value)
}
ret.StoreSchema[string(it.Key())] = types.StoreType(value[0])
ret.reserved = append(ret.reserved, string(it.Key())) // assume iter yields keys sorted
}
if err = it.Close(); err != nil {
return nil, err
}
return &ret, nil
}
// NewStore constructs a MultiStore directly from a database.
// Creates a new store if no data exists; otherwise loads existing data.
func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) {
versions, err := db.Versions()
if err != nil {
return
}
// If the DB is not empty, attempt to load existing data
if saved := versions.Count(); saved != 0 {
if opts.InitialVersion != 0 && versions.Last() < opts.InitialVersion {
return nil, fmt.Errorf("latest saved version is less than initial version: %v < %v",
versions.Last(), opts.InitialVersion)
}
}
// To abide by atomicity constraints, revert the DB to the last saved version, in case it contains
// committed data in the "working" version.
// This should only happen if Store.Commit previously failed.
err = db.Revert()
if err != nil {
return
}
stateTxn := db.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
stateCommitmentTxn := stateTxn
if opts.StateCommitmentDB != nil {
var scVersions dbm.VersionSet
scVersions, err = opts.StateCommitmentDB.Versions()
if err != nil {
return
}
// Version sets of each DB must match
if !versions.Equal(scVersions) {
err = fmt.Errorf("different version history between Storage and StateCommitment DB ")
return
}
err = opts.StateCommitmentDB.Revert()
if err != nil {
return
}
stateCommitmentTxn = opts.StateCommitmentDB.ReadWriter()
}
ret = &Store{
stateDB: db,
stateTxn: stateTxn,
StateCommitmentDB: opts.StateCommitmentDB,
stateCommitmentTxn: stateCommitmentTxn,
mem: mem.NewStore(),
tran: transient.NewStore(),
substoreCache: map[string]*substore{},
traceListenMixin: opts.traceListenMixin,
PersistentCache: opts.PersistentCache,
Pruning: opts.Pruning,
InitialVersion: opts.InitialVersion,
}
// Now load the substore schema
schemaView := prefixdb.NewReader(ret.stateDB.Reader(), schemaPrefix)
defer func() {
if err != nil {
err = util.CombineErrors(err, schemaView.Discard(), "schemaView.Discard also failed")
err = util.CombineErrors(err, ret.Close(), "base.Close also failed")
}
}()
reg, err := readSavedSchema(schemaView)
if err != nil {
return
}
// If the loaded schema is empty (for new store), just copy the config schema;
// Otherwise, verify it is identical to the config schema
if len(reg.StoreSchema) == 0 {
for k, v := range opts.StoreSchema {
reg.StoreSchema[k] = v
}
reg.reserved = make([]string, len(opts.reserved))
copy(reg.reserved, opts.reserved)
} else if !reg.equal(opts.StoreSchema) {
err = errors.New("loaded schema does not match configured schema")
return
}
// Apply migrations, then clear old schema and write the new one
for _, upgrades := range opts.Upgrades {
err = reg.migrate(ret, upgrades)
if err != nil {
return
}
}
schemaWriter := prefixdb.NewWriter(ret.stateTxn, schemaPrefix)
it, err := schemaView.Iterator(nil, nil)
if err != nil {
return
}
for it.Next() {
err = schemaWriter.Delete(it.Key())
if err != nil {
return
}
}
err = it.Close()
if err != nil {
return
}
err = schemaView.Discard()
if err != nil {
return
}
skeys := make([]string, 0, len(reg.StoreSchema))
for skey := range reg.StoreSchema {
skeys = append(skeys, skey)
}
sort.Strings(skeys)
// NB. the migrated contents and schema are not committed until the next store.Commit
for _, skey := range skeys {
typ := reg.StoreSchema[skey]
err = schemaWriter.Set([]byte(skey), []byte{byte(typ)})
if err != nil {
return
}
}
ret.schema = reg.StoreSchema
return ret, err
}
func (s *Store) Close() error {
err := s.stateTxn.Discard()
if s.StateCommitmentDB != nil {
err = util.CombineErrors(err, s.stateCommitmentTxn.Discard(), "stateCommitmentTxn.Discard also failed")
}
return err
}
// Applies store upgrades to the DB contents.
func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) error {
// Get a view of current state to allow mutation while iterating
reader := store.stateDB.Reader()
scReader := reader
if store.StateCommitmentDB != nil {
scReader = store.StateCommitmentDB.Reader()
}
for _, key := range upgrades.Deleted {
sst, ix, err := pr.storeInfo(key)
if err != nil {
return err
}
if sst != types.StoreTypePersistent {
return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", key, sst)
}
pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...)
delete(pr.StoreSchema, key)
pfx := substorePrefix(key)
subReader := prefixdb.NewReader(reader, pfx)
it, err := subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
store.stateTxn.Delete(it.Key())
}
it.Close()
if store.StateCommitmentDB != nil {
subReader = prefixdb.NewReader(scReader, pfx)
it, err = subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
store.stateCommitmentTxn.Delete(it.Key())
}
it.Close()
}
}
for _, rename := range upgrades.Renamed {
sst, ix, err := pr.storeInfo(rename.OldKey)
if err != nil {
return err
}
if sst != types.StoreTypePersistent {
return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", rename.OldKey, sst)
}
pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...)
delete(pr.StoreSchema, rename.OldKey)
err = pr.RegisterSubstore(rename.NewKey, types.StoreTypePersistent)
if err != nil {
return err
}
oldPrefix := substorePrefix(rename.OldKey)
newPrefix := substorePrefix(rename.NewKey)
subReader := prefixdb.NewReader(reader, oldPrefix)
subWriter := prefixdb.NewWriter(store.stateTxn, newPrefix)
it, err := subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
subWriter.Set(it.Key(), it.Value())
}
it.Close()
if store.StateCommitmentDB != nil {
subReader = prefixdb.NewReader(scReader, oldPrefix)
subWriter = prefixdb.NewWriter(store.stateCommitmentTxn, newPrefix)
it, err = subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
subWriter.Set(it.Key(), it.Value())
}
it.Close()
}
}
for _, key := range upgrades.Added {
err := pr.RegisterSubstore(key, types.StoreTypePersistent)
if err != nil {
return err
}
}
return nil
}
func substorePrefix(key string) []byte {
return append(contentPrefix, key...)
}
// GetKVStore implements BasicMultiStore.
func (s *Store) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
var parent types.KVStore
typ, has := s.schema[key]
if !has {
panic(ErrStoreNotFound(key))
}
switch typ {
case types.StoreTypeMemory:
parent = s.mem
case types.StoreTypeTransient:
parent = s.tran
case types.StoreTypePersistent:
default:
panic(fmt.Errorf("StoreType not supported: %v", typ)) // should never happen
}
var ret types.KVStore
if parent != nil { // store is non-persistent
ret = prefix.NewStore(parent, []byte(key))
} else { // store is persistent
sub, err := s.getSubstore(key)
if err != nil {
panic(err)
}
s.substoreCache[key] = sub
ret = sub
}
// Wrap with trace/listen if needed. Note: we don't cache this, so users must get a new substore after
// modifying tracers/listeners.
return s.wrapTraceListen(ret, skey)
}
// Gets a persistent substore. This reads, but does not update the substore cache.
// Use it in cases where we need to access a store internally (e.g. read/write Merkle keys, queries)
func (s *Store) getSubstore(key string) (*substore, error) {
if cached, has := s.substoreCache[key]; has {
return cached, nil
}
pfx := substorePrefix(key)
stateRW := prefixdb.NewReadWriter(s.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewReadWriter(s.stateCommitmentTxn, pfx)
var stateCommitmentStore *smt.Store
rootHash, err := stateRW.Get(substoreMerkleRootKey)
if err != nil {
return nil, err
}
if rootHash != nil {
stateCommitmentStore = loadSMT(stateCommitmentRW, rootHash)
} else {
smtdb := prefixdb.NewReadWriter(stateCommitmentRW, smtPrefix)
stateCommitmentStore = smt.NewStore(smtdb)
}
return &substore{
root: s,
name: key,
dataBucket: prefixdb.NewReadWriter(stateRW, dataPrefix),
indexBucket: prefixdb.NewReadWriter(stateRW, indexPrefix),
stateCommitmentStore: stateCommitmentStore,
}, nil
}
// Resets a substore's state after commit (because root stateTxn has been discarded)
func (s *substore) refresh(rootHash []byte) {
pfx := substorePrefix(s.name)
stateRW := prefixdb.NewReadWriter(s.root.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewReadWriter(s.root.stateCommitmentTxn, pfx)
s.dataBucket = prefixdb.NewReadWriter(stateRW, dataPrefix)
s.indexBucket = prefixdb.NewReadWriter(stateRW, indexPrefix)
s.stateCommitmentStore = loadSMT(stateCommitmentRW, rootHash)
}
// Commit implements Committer.
func (s *Store) Commit() types.CommitID {
// Substores read-lock this mutex; lock to prevent racey invalidation of underlying txns
s.mtx.Lock()
defer s.mtx.Unlock()
// Determine the target version
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
target := versions.Last() + 1
if target > math.MaxInt64 {
panic(ErrMaximumHeight)
}
// Fast forward to initial version if needed
if s.InitialVersion != 0 && target < s.InitialVersion {
target = s.InitialVersion
}
cid, err := s.commit(target)
if err != nil {
panic(err)
}
// Prune if necessary
previous := cid.Version - 1
if s.Pruning.Interval != 0 && cid.Version%int64(s.Pruning.Interval) == 0 {
// The range of newly prunable versions
lastPrunable := previous - int64(s.Pruning.KeepRecent)
firstPrunable := lastPrunable - int64(s.Pruning.Interval)
for version := firstPrunable; version <= lastPrunable; version++ {
s.stateDB.DeleteVersion(uint64(version))
if s.StateCommitmentDB != nil {
s.StateCommitmentDB.DeleteVersion(uint64(version))
}
}
}
s.tran.Commit()
return *cid
}
func (s *Store) getMerkleRoots() (ret map[string][]byte, err error) {
ret = map[string][]byte{}
for key := range s.schema {
sub, has := s.substoreCache[key]
if !has {
sub, err = s.getSubstore(key)
if err != nil {
return
}
}
ret[key] = sub.stateCommitmentStore.Root()
}
return
}
// Calculates root hashes and commits to DB. Does not verify target version or perform pruning.
func (s *Store) commit(target uint64) (id *types.CommitID, err error) {
storeHashes, err := s.getMerkleRoots()
if err != nil {
return
}
// Update substore Merkle roots
for key, storeHash := range storeHashes {
pfx := substorePrefix(key)
stateW := prefixdb.NewReadWriter(s.stateTxn, pfx)
if err = stateW.Set(substoreMerkleRootKey, storeHash); err != nil {
return
}
}
rootHash := sdkmaps.HashFromMap(storeHashes)
if err = s.stateTxn.Set(merkleRootKey, rootHash); err != nil {
return
}
if err = s.stateTxn.Commit(); err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.stateDB.Revert(), "stateDB.Revert also failed")
}
}()
err = s.stateDB.SaveVersion(target)
if err != nil {
return
}
stateTxn := s.stateDB.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
stateCommitmentTxn := stateTxn
// If DBs are not separate, StateCommitment state has been committed & snapshotted
if s.StateCommitmentDB != nil {
// if any error is encountered henceforth, we must revert the state and SC dbs
defer func() {
if err != nil {
if delerr := s.stateDB.DeleteVersion(target); delerr != nil {
err = fmt.Errorf("%w: commit rollback failed: %v", err, delerr)
}
}
}()
err = s.stateCommitmentTxn.Commit()
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.StateCommitmentDB.Revert(), "stateCommitmentDB.Revert also failed")
}
}()
err = s.StateCommitmentDB.SaveVersion(target)
if err != nil {
return
}
stateCommitmentTxn = s.StateCommitmentDB.ReadWriter()
}
s.stateTxn = stateTxn
s.stateCommitmentTxn = stateCommitmentTxn
// the state of all live substores must be refreshed
for key, sub := range s.substoreCache {
sub.refresh(storeHashes[key])
}
return &types.CommitID{Version: int64(target), Hash: rootHash}, nil
}
// LastCommitID implements Committer.
func (s *Store) LastCommitID() types.CommitID {
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
last := versions.Last()
if last == 0 {
return types.CommitID{}
}
// Latest Merkle root is the one currently stored
hash, err := s.stateTxn.Get(merkleRootKey)
if err != nil {
panic(err)
}
return types.CommitID{Version: int64(last), Hash: hash}
}
// SetInitialVersion implements CommitMultiStore.
func (s *Store) SetInitialVersion(version uint64) error {
s.InitialVersion = version
return nil
}
// GetVersion implements CommitMultiStore.
func (s *Store) GetVersion(version int64) (types.BasicMultiStore, error) {
return s.getView(version)
}
// CacheMultiStore implements BasicMultiStore.
func (s *Store) CacheMultiStore() types.CacheMultiStore {
return &cacheStore{
source: s,
substores: map[string]types.CacheKVStore{},
traceListenMixin: newTraceListenMixin(),
}
}
// PruneSnapshotHeight prunes the given height according to the prune strategy.
// If PruneNothing, this is a no-op.
// If other strategy, this height is persisted until it is
// less than <current height> - KeepRecent and <current height> % Interval == 0
func (s *Store) PruneSnapshotHeight(height int64) {
panic("not implemented")
}
// SetSnapshotInterval sets the interval at which the snapshots are taken.
// It is used by the store to determine which heights to retain until after the snapshot is complete.
func (s *Store) SetSnapshotInterval(snapshotInterval uint64) {
panic("not implemented")
}
// parsePath expects a format like /<storeName>[/<subpath>]
// Must start with /, subpath may be empty
// Returns error if it doesn't start with /
func parsePath(path string) (storeName string, subpath string, err error) {
if !strings.HasPrefix(path, "/") {
return storeName, subpath, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "invalid path: %s", path)
}
paths := strings.SplitN(path[1:], "/", 2)
storeName = paths[0]
if len(paths) == 2 {
subpath = "/" + paths[1]
}
return storeName, subpath, nil
}
// Query implements ABCI interface, allows queries.
//
// by default we will return from (latest height -1),
// as we will have merkle proofs immediately (header height = data height + 1)
// If latest-1 is not present, use latest (which must be present)
// if you care to have the latest data to see a tx results, you must
// explicitly set the height you want to see
func (s *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
if len(req.Data) == 0 {
return sdkerrors.QueryResult(sdkerrors.Wrap(sdkerrors.ErrTxDecode, "query cannot be zero length"), false)
}
// if height is 0, use the latest height
height := req.Height
if height == 0 {
versions, err := s.stateDB.Versions()
if err != nil {
return sdkerrors.QueryResult(errors.New("failed to get version info"), false)
}
latest := versions.Last()
if versions.Exists(latest - 1) {
height = int64(latest - 1)
} else {
height = int64(latest)
}
}
if height < 0 {
return sdkerrors.QueryResult(fmt.Errorf("height overflow: %v", height), false)
}
res.Height = height
storeName, subpath, err := parsePath(req.Path)
if err != nil {
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to parse path"), false)
}
view, err := s.getView(height)
if err != nil {
if errors.Is(err, dbm.ErrVersionDoesNotExist) {
err = sdkerrors.ErrInvalidHeight
}
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to access height"), false)
}
if _, has := s.schema[storeName]; !has {
return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", storeName), false)
}
substore, err := view.getSubstore(storeName)
if err != nil {
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to access store: %s", storeName), false)
}
switch subpath {
case "/key":
var err error
res.Key = req.Data // data holds the key bytes
res.Value = substore.Get(res.Key)
if !req.Prove {
break
}
// TODO: actual IBC compatible proof. This is a placeholder so unit tests can pass
res.ProofOps, err = substore.GetProof(res.Key)
if err != nil {
return sdkerrors.QueryResult(fmt.Errorf("merkle proof creation failed for key: %v", res.Key), false)
}
case "/subspace":
res.Key = req.Data // data holds the subspace prefix
pairs := kv.Pairs{
Pairs: make([]kv.Pair, 0),
}
res.Key = req.Data // data holds the subspace prefix
iterator := substore.Iterator(res.Key, types.PrefixEndBytes(res.Key))
for ; iterator.Valid(); iterator.Next() {
pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()})
}
iterator.Close()
bz, err := pairs.Marshal()
if err != nil {
panic(fmt.Errorf("failed to marshal KV pairs: %w", err))
}
res.Value = bz
default:
return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unexpected query path: %v", req.Path), false)
}
return res
}
func loadSMT(stateCommitmentTxn dbm.ReadWriter, root []byte) *smt.Store {
smtdb := prefixdb.NewReadWriter(stateCommitmentTxn, smtPrefix)
return smt.LoadStore(smtdb, root)
}
// Returns closest index and whether it's a match
func binarySearch(hay []string, ndl string) (int, bool) {
var mid int
from, to := 0, len(hay)-1
for from <= to {
mid = (from + to) / 2
switch strings.Compare(hay[mid], ndl) {
case -1:
from = mid + 1
case 1:
to = mid - 1
default:
return mid, true
}
}
return from, false
}
func (pr *prefixRegistry) storeInfo(key string) (sst types.StoreType, ix int, err error) {
ix, has := binarySearch(pr.reserved, key)
if !has {
err = fmt.Errorf("prefix does not exist: %v", key)
return
}
sst, has = pr.StoreSchema[key]
if !has {
err = fmt.Errorf("prefix is registered but not in schema: %v", key)
}
return
}
func (pr *prefixRegistry) RegisterSubstore(key string, typ types.StoreType) error {
if !validSubStoreType(typ) {
return fmt.Errorf("StoreType not supported: %v", typ)
}
// Find the neighboring reserved prefix, and check for duplicates and conflicts
i, has := binarySearch(pr.reserved, key)
if has {
return fmt.Errorf("prefix already exists: %v", key)
}
if i > 0 && strings.HasPrefix(key, pr.reserved[i-1]) {
return fmt.Errorf("prefix conflict: '%v' exists, cannot add '%v'", pr.reserved[i-1], key)
}
if i < len(pr.reserved) && strings.HasPrefix(pr.reserved[i], key) {
return fmt.Errorf("prefix conflict: '%v' exists, cannot add '%v'", pr.reserved[i], key)
}
reserved := pr.reserved[:i]
reserved = append(reserved, key)
pr.reserved = append(reserved, pr.reserved[i:]...)
pr.StoreSchema[key] = typ
return nil
}
func (tlm *traceListenMixin) AddListeners(skey types.StoreKey, listeners []types.WriteListener) {
key := skey.Name()
tlm.listeners[key] = append(tlm.listeners[key], listeners...)
}
// ListeningEnabled returns if listening is enabled for a specific KVStore
func (tlm *traceListenMixin) ListeningEnabled(key types.StoreKey) bool {
if ls, has := tlm.listeners[key.Name()]; has {
return len(ls) != 0
}
return false
}
func (tlm *traceListenMixin) TracingEnabled() bool {
return tlm.TraceWriter != nil
}
func (tlm *traceListenMixin) SetTracer(w io.Writer) {
tlm.TraceWriter = w
}
func (tlm *traceListenMixin) SetTraceContext(tc types.TraceContext) {
tlm.TraceContext = tc
}
func (tlm *traceListenMixin) wrapTraceListen(store types.KVStore, skey types.StoreKey) types.KVStore {
if tlm.TracingEnabled() {
store = tracekv.NewStore(store, tlm.TraceWriter, tlm.TraceContext)
}
if tlm.ListeningEnabled(skey) {
store = listenkv.NewStore(store, skey, tlm.listeners[skey.Name()])
}
return store
}
func (s *Store) GetPruning() pruningtypes.PruningOptions { return s.Pruning }
func (s *Store) SetPruning(po pruningtypes.PruningOptions) { s.Pruning = po }

View File

@ -1,986 +0,0 @@
package multi
import (
"bytes"
"math"
"testing"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/cosmos/cosmos-sdk/codec"
codecTypes "github.com/cosmos/cosmos-sdk/codec/types"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
alohaData = map[string]string{
"hello": "goodbye",
"aloha": "shalom",
}
skey_1 = types.NewKVStoreKey("store1")
skey_2 = types.NewKVStoreKey("store2")
skey_3 = types.NewKVStoreKey("store3")
skey_4 = types.NewKVStoreKey("store4")
skey_1b = types.NewKVStoreKey("store1b")
skey_2b = types.NewKVStoreKey("store2b")
skey_3b = types.NewKVStoreKey("store3b")
)
func simpleStoreConfig(t *testing.T) StoreConfig {
opts := DefaultStoreConfig()
require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent))
return opts
}
func storeConfig123(t *testing.T) StoreConfig {
opts := DefaultStoreConfig()
opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)
require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent))
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypePersistent))
require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypePersistent))
return opts
}
func newSubStoreWithData(t *testing.T, db dbm.Connection, storeData map[string]string) (*Store, types.KVStore) {
root, err := NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
store := root.GetKVStore(skey_1)
for k, v := range storeData {
store.Set([]byte(k), []byte(v))
}
return root, store
}
func TestGetSetHasDelete(t *testing.T) {
_, store := newSubStoreWithData(t, memdb.NewDB(), alohaData)
key := "hello"
exists := store.Has([]byte(key))
require.True(t, exists)
require.EqualValues(t, []byte(alohaData[key]), store.Get([]byte(key)))
value2 := "notgoodbye"
store.Set([]byte(key), []byte(value2))
require.EqualValues(t, value2, store.Get([]byte(key)))
store.Delete([]byte(key))
exists = store.Has([]byte(key))
require.False(t, exists)
require.Panics(t, func() { store.Get(nil) }, "Get(nil key) should panic")
require.Panics(t, func() { store.Get([]byte{}) }, "Get(empty key) should panic")
require.Panics(t, func() { store.Has(nil) }, "Has(nil key) should panic")
require.Panics(t, func() { store.Has([]byte{}) }, "Has(empty key) should panic")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "Set(nil key) should panic")
require.Panics(t, func() { store.Set([]byte{}, []byte("value")) }, "Set(empty key) should panic")
require.Panics(t, func() { store.Set([]byte("key"), nil) }, "Set(nil value) should panic")
sub := store.(*substore)
sub.indexBucket = rwCrudFails{sub.indexBucket, nil}
require.Panics(t, func() {
store.Set([]byte("key"), []byte("value"))
}, "Set() when index fails should panic")
}
func TestConstructors(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
_ = store.GetKVStore(skey_1)
store.Commit()
require.NoError(t, store.Close())
t.Run("fail to load if InitialVersion > lowest existing version", func(t *testing.T) {
opts := StoreConfig{InitialVersion: 5, Pruning: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)}
store, err = NewStore(db, opts)
require.Error(t, err)
db.Close()
})
t.Run("can't load store when db.Versions fails", func(t *testing.T) {
store, err = NewStore(dbVersionsFails{memdb.NewDB()}, DefaultStoreConfig())
require.Error(t, err)
store, err = NewStore(db, StoreConfig{StateCommitmentDB: dbVersionsFails{memdb.NewDB()}})
require.Error(t, err)
})
db = memdb.NewDB()
merkledb := memdb.NewDB()
w := db.Writer()
t.Run("can't use a DB with open writers", func(t *testing.T) {
store, err = NewStore(db, DefaultStoreConfig())
require.Error(t, err)
w.Discard()
w = merkledb.Writer()
store, err = NewStore(db, StoreConfig{StateCommitmentDB: merkledb})
require.Error(t, err)
w.Discard()
})
t.Run("can't use DBs with different version history", func(t *testing.T) {
merkledb.SaveNextVersion()
store, err = NewStore(db, StoreConfig{StateCommitmentDB: merkledb})
require.Error(t, err)
})
merkledb.Close()
t.Run("can't load existing store if we can't access root hash", func(t *testing.T) {
store, err = NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
store.Commit()
require.NoError(t, store.Close())
// ...whether because root is misssing
w = db.Writer()
s1RootKey := append(contentPrefix, substorePrefix(skey_1.Name())...)
s1RootKey = append(s1RootKey, merkleRootKey...)
w.Delete(s1RootKey)
w.Commit()
db.SaveNextVersion()
store, err = NewStore(db, DefaultStoreConfig())
require.Error(t, err)
// ...or, because of an error
store, err = NewStore(dbRWCrudFails{db}, DefaultStoreConfig())
require.Error(t, err)
})
}
func TestIterators(t *testing.T) {
_, store := newSubStoreWithData(t, memdb.NewDB(), map[string]string{
string([]byte{0x00}): "0",
string([]byte{0x00, 0x00}): "0 0",
string([]byte{0x00, 0x01}): "0 1",
string([]byte{0x00, 0x02}): "0 2",
string([]byte{0x01}): "1",
})
testCase := func(t *testing.T, iter types.Iterator, expected []string) {
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedValue := expected[i]
value := iter.Value()
require.EqualValues(t, string(value), expectedValue)
i++
}
require.Equal(t, len(expected), i)
}
testCase(t, store.Iterator(nil, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, []byte{0, 1}),
[]string{"0", "0 0"})
testCase(t, store.Iterator([]byte{0}, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0, 1}, []byte{1}),
[]string{"0 1", "0 2"})
testCase(t, store.Iterator(nil, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.Iterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, store.ReverseIterator(nil, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0, 1}),
[]string{"0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0, 1}, []byte{1}),
[]string{"0 2", "0 1"})
testCase(t, store.ReverseIterator(nil, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.ReverseIterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, types.KVStorePrefixIterator(store, []byte{0}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, types.KVStoreReversePrefixIterator(store, []byte{0}),
[]string{"0 2", "0 1", "0 0", "0"})
require.Panics(t, func() { store.Iterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.Iterator(nil, []byte{}) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator(nil, []byte{}) }, "Iterator(empty key) should panic")
}
func TestCommit(t *testing.T) {
testBasic := func(opts StoreConfig) {
db := memdb.NewDB()
store, err := NewStore(db, opts)
require.NoError(t, err)
require.Zero(t, store.LastCommitID())
idNew := store.Commit()
// Adding one record changes the hash
s1 := store.GetKVStore(skey_1)
s1.Set([]byte{0}, []byte{0})
idOne := store.Commit()
require.Equal(t, idNew.Version+1, idOne.Version)
require.NotEqual(t, idNew.Hash, idOne.Hash)
// Hash of emptied store is same as new store
s1.Delete([]byte{0})
idEmptied := store.Commit()
require.Equal(t, idNew.Hash, idEmptied.Hash)
previd := idOne
for i := byte(1); i < 5; i++ {
s1.Set([]byte{i}, []byte{i})
id := store.Commit()
lastid := store.LastCommitID()
require.Equal(t, id.Hash, lastid.Hash)
require.Equal(t, id.Version, lastid.Version)
require.NotEqual(t, previd.Hash, id.Hash)
require.NotEqual(t, previd.Version, id.Version)
}
}
basicOpts := simpleStoreConfig(t)
basicOpts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)
t.Run("sanity tests for Merkle hashing", func(t *testing.T) {
testBasic(basicOpts)
})
t.Run("sanity tests for Merkle hashing with separate DBs", func(t *testing.T) {
basicOpts.StateCommitmentDB = memdb.NewDB()
testBasic(basicOpts)
})
// test that we can recover from a failed commit
testFailedCommit := func(t *testing.T,
store *Store,
db dbm.Connection,
opts StoreConfig,
) {
if db == nil {
db = store.stateDB
}
s1 := store.GetKVStore(skey_1)
s1.Set([]byte{0}, []byte{0})
require.Panics(t, func() { store.Commit() })
require.NoError(t, store.Close())
// No version should be saved in the backing DB(s)
versions, _ := db.Versions()
require.Equal(t, 0, versions.Count())
if store.StateCommitmentDB != nil {
versions, _ = store.StateCommitmentDB.Versions()
require.Equal(t, 0, versions.Count())
}
// The store should now be reloaded successfully
store, err := NewStore(db, opts)
require.NoError(t, err)
s1 = store.GetKVStore(skey_1)
require.Nil(t, s1.Get([]byte{0}))
require.NoError(t, store.Close())
}
opts := simpleStoreConfig(t)
opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)
// Ensure Store's commit is rolled back in each failure case...
t.Run("recover after failed Commit", func(t *testing.T) {
store, err := NewStore(dbRWCommitFails{memdb.NewDB()}, opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
// If SaveVersion and Revert both fail during Store.Commit, the DB will contain
// committed data that belongs to no version: non-atomic behavior from the Store user's perspective.
// So, that data must be reverted when the store is reloaded.
t.Run("recover after failed SaveVersion and Revert", func(t *testing.T) {
var db dbm.Connection
db = dbSaveVersionFails{memdb.NewDB()}
// Revert should succeed in initial NewStore call, but fail during Commit
db = dbRevertFails{db, []bool{false, true}}
store, err := NewStore(db, opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
// Repeat the above for StateCommitmentDB
t.Run("recover after failed StateCommitmentDB Commit", func(t *testing.T) {
opts.StateCommitmentDB = dbRWCommitFails{memdb.NewDB()}
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
t.Run("recover after failed StateCommitmentDB SaveVersion and Revert", func(t *testing.T) {
var db dbm.Connection
db = dbSaveVersionFails{memdb.NewDB()}
db = dbRevertFails{db, []bool{false, true}}
opts.StateCommitmentDB = db
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
opts = simpleStoreConfig(t)
t.Run("recover after stateDB.Versions error triggers failure", func(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, opts)
require.NoError(t, err)
store.stateDB = dbVersionsFails{store.stateDB}
testFailedCommit(t, store, db, opts)
})
t.Run("recover after stateTxn.Set error triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.stateTxn = rwCrudFails{store.stateTxn, merkleRootKey}
testFailedCommit(t, store, nil, opts)
})
t.Run("stateDB.DeleteVersion error triggers failure", func(t *testing.T) {
opts.StateCommitmentDB = memdb.NewDB()
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.stateCommitmentTxn = rwCommitFails{store.stateCommitmentTxn}
store.stateDB = dbDeleteVersionFails{store.stateDB}
require.Panics(t, func() { store.Commit() })
})
t.Run("height overflow triggers failure", func(t *testing.T) {
opts.StateCommitmentDB = nil
opts.InitialVersion = math.MaxInt64
opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
require.Equal(t, int64(math.MaxInt64), store.Commit().Version)
require.Panics(t, func() { store.Commit() })
require.Equal(t, int64(math.MaxInt64), store.LastCommitID().Version) // version history not modified
})
t.Run("first commit version matches InitialVersion", func(t *testing.T) {
opts = simpleStoreConfig(t)
opts.InitialVersion = 5
opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)
opts.StateCommitmentDB = memdb.NewDB()
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
require.Equal(t, int64(5), store.Commit().Version)
})
// test improbable failures to fill out test coverage
opts = simpleStoreConfig(t)
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.Commit()
store.stateDB = dbVersionsFails{store.stateDB}
require.Panics(t, func() { store.LastCommitID() })
opts = simpleStoreConfig(t)
opts.StateCommitmentDB = memdb.NewDB()
store, err = NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.Commit()
store.stateTxn = rwCrudFails{store.stateTxn, nil}
require.Panics(t, func() { store.LastCommitID() })
}
func sliceToSet(slice []uint64) map[uint64]struct{} {
res := make(map[uint64]struct{})
for _, x := range slice {
res[x] = struct{}{}
}
return res
}
func TestPruning(t *testing.T) {
// Save versions up to 10 and verify pruning at final commit
testCases := []struct {
pruningtypes.PruningOptions
kept []uint64
}{
{pruningtypes.NewCustomPruningOptions(2, 10), []uint64{8, 9, 10}},
{pruningtypes.NewCustomPruningOptions(0, 10), []uint64{10}},
{pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []uint64{8, 9, 10}},
{pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
}
for tci, tc := range testCases {
dbs := []dbm.Connection{memdb.NewDB(), memdb.NewDB()}
opts := simpleStoreConfig(t)
opts.Pruning = tc.PruningOptions
opts.StateCommitmentDB = dbs[1]
store, err := NewStore(dbs[0], opts)
require.NoError(t, err)
s1 := store.GetKVStore(skey_1)
for i := byte(1); i <= 10; i++ {
s1.Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
}
for _, db := range dbs {
versions, err := db.Versions()
require.NoError(t, err)
kept := sliceToSet(tc.kept)
for v := uint64(1); v <= 10; v++ {
_, has := kept[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, tci)
}
}
}
// Test pruning interval
// Save up to 20th version while checking history at specific version checkpoints
testCheckPoints := map[uint64][]uint64{
5: {1, 2, 3, 4, 5},
10: {10},
15: {10, 11, 12, 13, 14, 15},
20: {20},
}
db := memdb.NewDB()
opts := simpleStoreConfig(t)
opts.Pruning = pruningtypes.NewCustomPruningOptions(0, 10)
store, err := NewStore(db, opts)
require.NoError(t, err)
for i := byte(1); i <= 20; i++ {
store.GetKVStore(skey_1).Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
kept, has := testCheckPoints[latest]
if !has {
continue
}
versions, err := db.Versions()
require.NoError(t, err)
keptMap := sliceToSet(kept)
for v := uint64(1); v <= latest; v++ {
_, has := keptMap[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, i)
}
}
}
func queryPath(skey types.StoreKey, endp string) string { return "/" + skey.Name() + endp }
func TestQuery(t *testing.T) {
k1, v1 := []byte("k1"), []byte("v1")
k2, v2 := []byte("k2"), []byte("v2")
v3 := []byte("v3")
ksub := []byte("k")
KVs0 := kv.Pairs{}
KVs1 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v1},
{Key: k2, Value: v2},
},
}
KVs2 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v3},
{Key: k2, Value: v2},
},
}
valExpSubEmpty, err := KVs0.Marshal()
require.NoError(t, err)
valExpSub1, err := KVs1.Marshal()
require.NoError(t, err)
valExpSub2, err := KVs2.Marshal()
require.NoError(t, err)
store, err := NewStore(memdb.NewDB(), simpleStoreConfig(t))
require.NoError(t, err)
cid := store.Commit()
ver := cid.Version
query := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: ver}
querySub := abci.RequestQuery{Path: queryPath(skey_1, "/subspace"), Data: ksub, Height: ver}
queryHeight0 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1}
// query subspace before anything set
qres := store.Query(querySub)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, valExpSubEmpty, qres.Value)
sub := store.GetKVStore(skey_1)
require.NotNil(t, sub)
// set data
sub.Set(k1, v1)
sub.Set(k2, v2)
t.Run("basic queries", func(t *testing.T) {
// set data without commit, doesn't show up
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Nil(t, qres.Value)
// commit it, but still don't see on old version
cid = store.Commit()
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Nil(t, qres.Value)
// but yes on the new version
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
// and for the subspace
querySub.Height = cid.Version
qres = store.Query(querySub)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, valExpSub1, qres.Value)
// modify
sub.Set(k1, v3)
cid = store.Commit()
// query will return old values, as height is fixed
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
// update to latest height in the query and we are happy
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v3, qres.Value)
// try other key
query2 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k2, Height: cid.Version}
qres = store.Query(query2)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v2, qres.Value)
// and for the subspace
querySub.Height = cid.Version
qres = store.Query(querySub)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, valExpSub2, qres.Value)
// default (height 0) will show latest-1
qres = store.Query(queryHeight0)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
})
// querying an empty store will fail
store2, err := NewStore(memdb.NewDB(), simpleStoreConfig(t))
require.NoError(t, err)
qres = store2.Query(queryHeight0)
require.True(t, qres.IsErr())
// default shows latest, if latest-1 does not exist
store2.GetKVStore(skey_1).Set(k1, v1)
store2.Commit()
qres = store2.Query(queryHeight0)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
store2.Close()
t.Run("failed queries", func(t *testing.T) {
// artificial error cases for coverage (should never happen with prescribed usage)
// ensure that height overflow triggers an error
require.NoError(t, err)
store2.stateDB = dbVersionsIs{store2.stateDB, dbm.NewVersionManager([]uint64{uint64(math.MaxInt64) + 1})}
qres = store2.Query(queryHeight0)
require.True(t, qres.IsErr())
// failure to access versions triggers an error
store2.stateDB = dbVersionsFails{store.stateDB}
qres = store2.Query(queryHeight0)
require.True(t, qres.IsErr())
store2.Close()
// query with a nil or empty key fails
badquery := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: []byte{}}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
badquery.Data = nil
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// querying an invalid height will fail
badquery = abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: store.LastCommitID().Version + 1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// or an invalid path
badquery = abci.RequestQuery{Path: queryPath(skey_1, "/badpath"), Data: k1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
})
t.Run("queries with proof", func(t *testing.T) {
// test that proofs are generated with single and separate DBs
testProve := func() {
queryProve0 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Prove: true}
qres = store.Query(queryProve0)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
require.NotNil(t, qres.ProofOps)
}
testProve()
store.Close()
opts := simpleStoreConfig(t)
opts.StateCommitmentDB = memdb.NewDB()
store, err = NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.GetKVStore(skey_1).Set(k1, v1)
store.Commit()
testProve()
store.Close()
})
}
func TestStoreConfig(t *testing.T) {
opts := DefaultStoreConfig()
// Fail with invalid types
require.Error(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypeDB))
require.Error(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypeSMT))
// Ensure that no prefix conflicts are allowed
require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent))
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory))
require.NoError(t, opts.RegisterSubstore(skey_3b.Name(), types.StoreTypeTransient))
require.Error(t, opts.RegisterSubstore(skey_1b.Name(), types.StoreTypePersistent))
require.Error(t, opts.RegisterSubstore(skey_2b.Name(), types.StoreTypePersistent))
require.Error(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypePersistent))
}
func TestMultiStoreBasic(t *testing.T) {
opts := DefaultStoreConfig()
err := opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent)
require.NoError(t, err)
db := memdb.NewDB()
store, err := NewStore(db, opts)
require.NoError(t, err)
store_1 := store.GetKVStore(skey_1)
require.NotNil(t, store_1)
store_1.Set([]byte{0}, []byte{0})
val := store_1.Get([]byte{0})
require.Equal(t, []byte{0}, val)
store_1.Delete([]byte{0})
val = store_1.Get([]byte{0})
require.Equal(t, []byte(nil), val)
}
func TestGetVersion(t *testing.T) {
db := memdb.NewDB()
opts := storeConfig123(t)
store, err := NewStore(db, opts)
require.NoError(t, err)
cid := store.Commit()
view, err := store.GetVersion(cid.Version)
require.NoError(t, err)
subview := view.GetKVStore(skey_1)
require.NotNil(t, subview)
// version view should be read-only
require.Panics(t, func() { subview.Set([]byte{1}, []byte{1}) })
require.Panics(t, func() { subview.Delete([]byte{0}) })
// nonexistent version shouldn't be accessible
_, err = store.GetVersion(cid.Version + 1)
require.Equal(t, ErrVersionDoesNotExist, err)
substore := store.GetKVStore(skey_1)
require.NotNil(t, substore)
substore.Set([]byte{0}, []byte{0})
// setting a value shouldn't affect old version
require.False(t, subview.Has([]byte{0}))
cid = store.Commit()
view, err = store.GetVersion(cid.Version)
require.NoError(t, err)
subview = view.GetKVStore(skey_1)
require.NotNil(t, subview)
// deleting a value shouldn't affect old version
substore.Delete([]byte{0})
require.Equal(t, []byte{0}, subview.Get([]byte{0}))
}
func TestMultiStoreMigration(t *testing.T) {
db := memdb.NewDB()
opts := storeConfig123(t)
store, err := NewStore(db, opts)
require.NoError(t, err)
// write some data in all stores
k1, v1 := []byte("first"), []byte("store")
s1 := store.GetKVStore(skey_1)
require.NotNil(t, s1)
s1.Set(k1, v1)
k2, v2 := []byte("second"), []byte("restore")
s2 := store.GetKVStore(skey_2)
require.NotNil(t, s2)
s2.Set(k2, v2)
k3, v3 := []byte("third"), []byte("dropped")
s3 := store.GetKVStore(skey_3)
require.NotNil(t, s3)
s3.Set(k3, v3)
k4, v4 := []byte("fourth"), []byte("created")
require.Panics(t, func() { store.GetKVStore(skey_4) })
cid := store.Commit()
require.NoError(t, store.Close())
var migratedID types.CommitID
// Load without changes and make sure it is sensible
store, err = NewStore(db, opts)
require.NoError(t, err)
// let's query data to see it was saved properly
s2 = store.GetKVStore(skey_2)
require.NotNil(t, s2)
require.Equal(t, v2, s2.Get(k2))
require.NoError(t, store.Close())
t.Run("basic migration", func(t *testing.T) {
// now, let's load with upgrades...
opts.Upgrades = []types.StoreUpgrades{
{
Added: []string{skey_4.Name()},
Renamed: []types.StoreRename{{
OldKey: skey_2.Name(),
NewKey: skey_2b.Name(),
}},
Deleted: []string{skey_3.Name()},
},
}
store, err = NewStore(db, opts)
require.Nil(t, err)
// s1 was not changed
s1 = store.GetKVStore(skey_1)
require.NotNil(t, s1)
require.Equal(t, v1, s1.Get(k1))
// store2 is no longer valid
require.Panics(t, func() { store.GetKVStore(skey_2) })
// store2b has the old data
rs2 := store.GetKVStore(skey_2b)
require.NotNil(t, rs2)
require.Equal(t, v2, rs2.Get(k2))
// store3 is gone
require.Panics(t, func() { s3 = store.GetKVStore(skey_3) })
// store4 is valid
s4 := store.GetKVStore(skey_4)
require.NotNil(t, s4)
values := 0
it := s4.Iterator(nil, nil)
for ; it.Valid(); it.Next() {
values += 1
}
require.Zero(t, values)
require.NoError(t, it.Close())
// write something inside store4
s4.Set(k4, v4)
// store this migrated data, and load it again without migrations
migratedID = store.Commit()
require.Equal(t, migratedID.Version, int64(2))
require.NoError(t, store.Close())
})
t.Run("reload after migrations", func(t *testing.T) {
// fail to load the migrated store with the old schema
store, err = NewStore(db, storeConfig123(t))
require.Error(t, err)
// pass in a schema reflecting the migrations
migratedOpts := DefaultStoreConfig()
err = migratedOpts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent)
require.NoError(t, err)
err = migratedOpts.RegisterSubstore(skey_2b.Name(), types.StoreTypePersistent)
require.NoError(t, err)
err = migratedOpts.RegisterSubstore(skey_4.Name(), types.StoreTypePersistent)
require.NoError(t, err)
store, err = NewStore(db, migratedOpts)
require.Nil(t, err)
require.Equal(t, migratedID, store.LastCommitID())
// query this new store
rl1 := store.GetKVStore(skey_1)
require.NotNil(t, rl1)
require.Equal(t, v1, rl1.Get(k1))
rl2 := store.GetKVStore(skey_2b)
require.NotNil(t, rl2)
require.Equal(t, v2, rl2.Get(k2))
rl4 := store.GetKVStore(skey_4)
require.NotNil(t, rl4)
require.Equal(t, v4, rl4.Get(k4))
})
t.Run("load view from before migrations", func(t *testing.T) {
// load and check a view of the store at first commit
view, err := store.GetVersion(cid.Version)
require.NoError(t, err)
s1 = view.GetKVStore(skey_1)
require.NotNil(t, s1)
require.Equal(t, v1, s1.Get(k1))
s2 = view.GetKVStore(skey_2)
require.NotNil(t, s2)
require.Equal(t, v2, s2.Get(k2))
s3 = view.GetKVStore(skey_3)
require.NotNil(t, s3)
require.Equal(t, v3, s3.Get(k3))
require.Panics(t, func() {
view.GetKVStore(skey_4)
})
})
}
func TestTrace(t *testing.T) {
key, value := []byte("test-key"), []byte("test-value")
tctx := types.TraceContext(map[string]interface{}{"blockHeight": 64})
expected_Set := "{\"operation\":\"write\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n"
expected_Get := "{\"operation\":\"read\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n"
expected_Get_missing := "{\"operation\":\"read\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n"
expected_Delete := "{\"operation\":\"delete\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n"
expected_IterKey := "{\"operation\":\"iterKey\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n"
expected_IterValue := "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n"
db := memdb.NewDB()
opts := simpleStoreConfig(t)
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory))
require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypeTransient))
store, err := NewStore(db, opts)
require.NoError(t, err)
store.SetTraceContext(tctx)
require.False(t, store.TracingEnabled())
var buf bytes.Buffer
store.SetTracer(&buf)
require.True(t, store.TracingEnabled())
for _, skey := range []types.StoreKey{skey_1, skey_2, skey_3} {
buf.Reset()
store.GetKVStore(skey).Get(key)
require.Equal(t, expected_Get_missing, buf.String())
buf.Reset()
store.GetKVStore(skey).Set(key, value)
require.Equal(t, expected_Set, buf.String())
buf.Reset()
require.Equal(t, value, store.GetKVStore(skey).Get(key))
require.Equal(t, expected_Get, buf.String())
iter := store.GetKVStore(skey).Iterator(nil, nil)
buf.Reset()
require.Equal(t, key, iter.Key())
require.Equal(t, expected_IterKey, buf.String())
buf.Reset()
require.Equal(t, value, iter.Value())
require.Equal(t, expected_IterValue, buf.String())
require.NoError(t, iter.Close())
buf.Reset()
store.GetKVStore(skey).Delete(key)
require.Equal(t, expected_Delete, buf.String())
}
store.SetTracer(nil)
require.False(t, store.TracingEnabled())
require.NoError(t, store.Close())
}
func TestListeners(t *testing.T) {
kvPairs := []types.KVPair{
{Key: []byte{1}, Value: []byte("v1")},
{Key: []byte{2}, Value: []byte("v2")},
{Key: []byte{3}, Value: []byte("v3")},
}
testCases := []struct {
key []byte
value []byte
skey types.StoreKey
}{
{
key: kvPairs[0].Key,
value: kvPairs[0].Value,
skey: skey_1,
},
{
key: kvPairs[1].Key,
value: kvPairs[1].Value,
skey: skey_2,
},
{
key: kvPairs[2].Key,
value: kvPairs[2].Value,
skey: skey_3,
},
}
interfaceRegistry := codecTypes.NewInterfaceRegistry()
marshaller := codec.NewProtoCodec(interfaceRegistry)
db := memdb.NewDB()
opts := simpleStoreConfig(t)
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory))
require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypeTransient))
store, err := NewStore(db, opts)
require.NoError(t, err)
for i, tc := range testCases {
var buf bytes.Buffer
listener := types.NewStoreKVPairWriteListener(&buf, marshaller)
store.AddListeners(tc.skey, []types.WriteListener{listener})
require.True(t, store.ListeningEnabled(tc.skey))
// Set case
expected := types.StoreKVPair{
Key: tc.key,
Value: tc.value,
StoreKey: tc.skey.Name(),
Delete: false,
}
var kvpair types.StoreKVPair
buf.Reset()
store.GetKVStore(tc.skey).Set(tc.key, tc.value)
require.NoError(t, marshaller.UnmarshalLengthPrefixed(buf.Bytes(), &kvpair))
require.Equal(t, expected, kvpair, i)
// Delete case
expected = types.StoreKVPair{
Key: tc.key,
Value: nil,
StoreKey: tc.skey.Name(),
Delete: true,
}
kvpair = types.StoreKVPair{}
buf.Reset()
store.GetKVStore(tc.skey).Delete(tc.key)
require.NoError(t, marshaller.UnmarshalLengthPrefixed(buf.Bytes(), &kvpair))
require.Equal(t, expected, kvpair, i)
}
require.NoError(t, store.Close())
}

View File

@ -1,117 +0,0 @@
package multi
import (
"crypto/sha256"
"io"
"sync"
dbm "github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/internal/db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
)
// Get implements KVStore.
func (s *substore) Get(key []byte) []byte {
s.root.mtx.RLock()
defer s.root.mtx.RUnlock()
val, err := s.dataBucket.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has implements KVStore.
func (s *substore) Has(key []byte) bool {
s.root.mtx.RLock()
defer s.root.mtx.RUnlock()
has, err := s.dataBucket.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set implements KVStore.
func (s *substore) Set(key, value []byte) {
s.root.mtx.Lock()
defer s.root.mtx.Unlock()
err := s.dataBucket.Set(key, value)
if err != nil {
panic(err)
}
s.stateCommitmentStore.Set(key, value)
khash := sha256.Sum256(key)
err = s.indexBucket.Set(khash[:], key)
if err != nil {
panic(err)
}
}
// Delete implements KVStore.
func (s *substore) Delete(key []byte) {
khash := sha256.Sum256(key)
s.root.mtx.Lock()
defer s.root.mtx.Unlock()
s.stateCommitmentStore.Delete(key)
_ = s.indexBucket.Delete(khash[:])
_ = s.dataBucket.Delete(key)
}
type contentsIterator struct {
types.Iterator
locker sync.Locker
}
func (s *substore) newSubstoreIterator(source dbm.Iterator) *contentsIterator {
locker := s.root.mtx.RLocker()
locker.Lock()
return &contentsIterator{dbutil.ToStoreIterator(source), locker}
}
func (it *contentsIterator) Close() error {
defer it.locker.Unlock()
return it.Iterator.Close()
}
// Iterator implements KVStore.
func (s *substore) Iterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.Iterator(start, end)
if err != nil {
panic(err)
}
return s.newSubstoreIterator(iter)
}
// ReverseIterator implements KVStore.
func (s *substore) ReverseIterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return s.newSubstoreIterator(iter)
}
// GetStoreType implements Store.
func (s *substore) GetStoreType() types.StoreType {
return types.StoreTypePersistent
}
func (s *substore) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
func (s *substore) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
func (s *substore) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners))
}

View File

@ -1,89 +0,0 @@
//nolint:unused
package multi
import (
"bytes"
"errors"
dbm "github.com/cosmos/cosmos-sdk/db"
)
type (
dbDeleteVersionFails struct{ dbm.Connection }
dbRWCommitFails struct{ dbm.Connection }
dbRWCrudFails struct{ dbm.Connection }
dbSaveVersionFails struct{ dbm.Connection }
dbRevertFails struct {
dbm.Connection
// order of calls to fail on (eg. [1, 0] => first call fails; second succeeds)
failOn []bool
}
)
type dbVersionsIs struct {
dbm.Connection
vset dbm.VersionSet
}
type (
dbVersionsFails struct{ dbm.Connection }
rwCommitFails struct{ dbm.ReadWriter }
rwCrudFails struct {
dbm.ReadWriter
onKey []byte
}
)
func (dbVersionsFails) Versions() (dbm.VersionSet, error) { return nil, errors.New("dbVersionsFails") }
func (db dbVersionsIs) Versions() (dbm.VersionSet, error) { return db.vset, nil }
func (db dbRWCrudFails) ReadWriter() dbm.ReadWriter {
return rwCrudFails{db.Connection.ReadWriter(), nil}
}
func (dbSaveVersionFails) SaveVersion(uint64) error { return errors.New("dbSaveVersionFails") }
func (db dbRevertFails) Revert() error {
fail := false
if len(db.failOn) > 0 {
fail, db.failOn = db.failOn[0], db.failOn[1:] //nolint:staticcheck
}
if fail {
return errors.New("dbRevertFails")
}
return db.Connection.Revert()
}
func (dbDeleteVersionFails) DeleteVersion(uint64) error { return errors.New("dbDeleteVersionFails") }
func (tx rwCommitFails) Commit() error {
tx.Discard()
return errors.New("rwCommitFails")
}
func (db dbRWCommitFails) ReadWriter() dbm.ReadWriter {
return rwCommitFails{db.Connection.ReadWriter()}
}
func (rw rwCrudFails) Get(k []byte) ([]byte, error) {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return nil, errors.New("rwCrudFails.Get")
}
return rw.ReadWriter.Get(k)
}
func (rw rwCrudFails) Has(k []byte) (bool, error) {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return false, errors.New("rwCrudFails.Has")
}
return rw.ReadWriter.Has(k)
}
func (rw rwCrudFails) Set(k []byte, v []byte) error {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return errors.New("rwCrudFails.Set")
}
return rw.ReadWriter.Set(k, v)
}
func (rw rwCrudFails) Delete(k []byte) error {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return errors.New("rwCrudFails.Delete")
}
return rw.ReadWriter.Delete(k)
}

View File

@ -1,178 +0,0 @@
package multi
import (
"errors"
"io"
dbm "github.com/cosmos/cosmos-sdk/db"
prefixdb "github.com/cosmos/cosmos-sdk/db/prefix"
util "github.com/cosmos/cosmos-sdk/internal"
dbutil "github.com/cosmos/cosmos-sdk/internal/db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
types "github.com/cosmos/cosmos-sdk/store/v2alpha1"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/smt"
)
var ErrReadOnly = errors.New("cannot modify read-only store")
func (s *viewSubstore) GetStateCommitmentStore() *smt.Store {
return s.stateCommitmentStore
}
// Get implements KVStore.
func (s *viewSubstore) Get(key []byte) []byte {
val, err := s.dataBucket.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has implements KVStore.
func (s *viewSubstore) Has(key []byte) bool {
has, err := s.dataBucket.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set implements KVStore.
func (s *viewSubstore) Set(key []byte, value []byte) {
panic(ErrReadOnly)
}
// Delete implements KVStore.
func (s *viewSubstore) Delete(key []byte) {
panic(ErrReadOnly)
}
// Iterator implements KVStore.
func (s *viewSubstore) Iterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.Iterator(start, end)
if err != nil {
panic(err)
}
return dbutil.ToStoreIterator(iter)
}
// ReverseIterator implements KVStore.
func (s *viewSubstore) ReverseIterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return dbutil.ToStoreIterator(iter)
}
// GetStoreType implements Store.
func (s *viewSubstore) GetStoreType() types.StoreType {
return types.StoreTypePersistent
}
func (s *viewSubstore) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
func (s *viewSubstore) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
func (s *viewSubstore) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners))
}
func (s *viewStore) getMerkleRoots() (ret map[string][]byte, err error) {
ret = map[string][]byte{}
for key := range s.schema {
sub, has := s.substoreCache[key]
if !has {
sub, err = s.getSubstore(key)
if err != nil {
return
}
}
ret[key] = sub.stateCommitmentStore.Root()
}
return
}
func (store *Store) getView(version int64) (ret *viewStore, err error) {
stateView, err := store.stateDB.ReaderAt(uint64(version))
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, stateView.Discard(), "stateView.Discard also failed")
}
}()
stateCommitmentView := stateView
if store.StateCommitmentDB != nil {
stateCommitmentView, err = store.StateCommitmentDB.ReaderAt(uint64(version))
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, stateCommitmentView.Discard(), "stateCommitmentView.Discard also failed")
}
}()
}
// Now read this version's schema
schemaView := prefixdb.NewReader(stateView, schemaPrefix)
defer func() {
if err != nil {
err = util.CombineErrors(err, schemaView.Discard(), "schemaView.Discard also failed")
}
}()
pr, err := readSavedSchema(schemaView)
if err != nil {
return
}
// The migrated contents and schema are not committed until the next store.Commit
ret = &viewStore{
stateView: stateView,
stateCommitmentView: stateCommitmentView,
substoreCache: map[string]*viewSubstore{},
schema: pr.StoreSchema,
}
return ret, err
}
func (s *viewStore) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
if _, has := s.schema[key]; !has {
panic(ErrStoreNotFound(key))
}
ret, err := s.getSubstore(key)
if err != nil {
panic(err)
}
s.substoreCache[key] = ret
return ret
}
// Reads but does not update substore cache
func (s *viewStore) getSubstore(key string) (*viewSubstore, error) {
if cached, has := s.substoreCache[key]; has {
return cached, nil
}
pfx := substorePrefix(key)
stateR := prefixdb.NewReader(s.stateView, pfx)
stateCommitmentR := prefixdb.NewReader(s.stateCommitmentView, pfx)
rootHash, err := stateR.Get(merkleRootKey)
if err != nil {
return nil, err
}
return &viewSubstore{
root: s,
name: key,
dataBucket: prefixdb.NewReader(stateR, dataPrefix),
indexBucket: prefixdb.NewReader(stateR, indexPrefix),
stateCommitmentStore: loadSMT(dbm.ReaderAsReadWriter(stateCommitmentR), rootHash),
}, nil
}

View File

@ -1,124 +0,0 @@
// Here we implement proof generation according to the ICS-23 specification:
// https://github.com/cosmos/ibc/tree/master/spec/core/ics-023-vector-commitments
package smt
import (
"crypto/sha256"
"fmt"
dbm "github.com/cosmos/cosmos-sdk/db"
ics23 "github.com/confio/ics23/go"
)
func createIcs23Proof(store *Store, key []byte) (*ics23.CommitmentProof, error) {
ret := &ics23.CommitmentProof{}
path := sha256.Sum256(key)
has, err := store.tree.Has(key)
if err != nil {
return nil, err
}
if has { // Membership proof
value, err := store.values.Get(path[:])
if err != nil {
return nil, err
}
if value == nil {
return nil, fmt.Errorf("value not found for: %v", key)
}
proof, err := store.tree.Prove(key)
if err != nil {
return nil, err
}
ret.Proof = &ics23.CommitmentProof_Exist{Exist: &ics23.ExistenceProof{
Key: path[:],
Value: value,
Leaf: ics23.SmtSpec.LeafSpec,
Path: convertInnerOps(path[:], proof.SideNodes),
}}
} else { // Non-membership
nonexist, err := toNonExistenceProof(store, path)
if err != nil {
return nil, err
}
ret.Proof = &ics23.CommitmentProof_Nonexist{Nonexist: nonexist}
}
return ret, nil
}
func toNonExistenceProof(store *Store, path [32]byte) (*ics23.NonExistenceProof, error) {
// Seek to our neighbors via the backing DB
getNext := func(it dbm.Iterator) (*ics23.ExistenceProof, error) {
defer it.Close()
if it.Next() {
value, err := store.values.Get(it.Key())
if err != nil {
return nil, err
}
if value == nil {
return nil, fmt.Errorf("value not found for: %v", it.Value())
}
proof, err := store.tree.Prove(it.Value()) // pass the preimage to Prove
if err != nil {
return nil, err
}
return &ics23.ExistenceProof{
Key: it.Key(),
Value: value,
Leaf: ics23.SmtSpec.LeafSpec,
Path: convertInnerOps(it.Key(), proof.SideNodes),
}, nil
}
return nil, nil
}
var lproof, rproof *ics23.ExistenceProof
it, err := store.preimages.ReverseIterator(nil, path[:])
if err != nil {
return nil, err
}
lproof, err = getNext(it)
if err != nil {
return nil, err
}
it, err = store.preimages.Iterator(path[:], nil)
if err != nil {
return nil, err
}
rproof, err = getNext(it)
if err != nil {
return nil, err
}
return &ics23.NonExistenceProof{
Key: path[:],
Left: lproof,
Right: rproof,
}, nil
}
func convertInnerOps(path []byte, sideNodes [][]byte) []*ics23.InnerOp {
depth := len(sideNodes)
inners := make([]*ics23.InnerOp, 0, depth)
for i := 0; i < len(sideNodes); i++ {
op := &ics23.InnerOp{
Hash: ics23.HashOp_SHA256,
Prefix: []byte{1},
}
if getBitAtFromMSB(path, depth-1-i) == 1 {
// right child is on path
op.Prefix = append(op.Prefix, sideNodes[i]...)
} else {
op.Suffix = sideNodes[i]
}
inners = append(inners, op)
}
return inners
}
// getBitAtFromMSB gets the bit at an offset from the most significant bit
func getBitAtFromMSB(data []byte, position int) int {
if int(data[position/8])&(1<<(8-1-uint(position)%8)) > 0 {
return 1
}
return 0
}

View File

@ -1,108 +0,0 @@
package smt_test
import (
"crypto/sha256"
"testing"
ics23 "github.com/confio/ics23/go"
"github.com/stretchr/testify/assert"
"github.com/cosmos/cosmos-sdk/db/memdb"
store "github.com/cosmos/cosmos-sdk/store/v2alpha1/smt"
)
func TestProofICS23(t *testing.T) {
txn := memdb.NewDB().ReadWriter()
s := store.NewStore(txn)
// pick keys whose hashes begin with different bits
key00 := []byte("foo") // 00101100 = sha256(foo)[0]
key01 := []byte("bill") // 01100010
key10 := []byte("baz") // 10111010
key11 := []byte("bar") // 11111100
path00 := sha256.Sum256(key00)
path01 := sha256.Sum256(key01)
path10 := sha256.Sum256(key10)
val1 := []byte("0")
val2 := []byte("1")
s.Set(key01, val1)
// Membership
proof, err := s.GetProofICS23(key01)
assert.NoError(t, err)
nonexist := proof.GetNonexist()
assert.Nil(t, nonexist)
exist := proof.GetExist()
assert.NotNil(t, exist)
assert.Equal(t, 0, len(exist.Path))
assert.NoError(t, exist.Verify(ics23.SmtSpec, s.Root(), path01[:], val1))
// Non-membership
proof, err = s.GetProofICS23(key00) // When leaf is leftmost node
assert.NoError(t, err)
nonexist = proof.GetNonexist()
assert.NotNil(t, nonexist)
assert.Nil(t, nonexist.Left)
assert.Equal(t, path00[:], nonexist.Key)
assert.NotNil(t, nonexist.Right)
assert.Equal(t, 0, len(nonexist.Right.Path))
assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path00[:]))
proof, err = s.GetProofICS23(key10) // When rightmost
assert.NoError(t, err)
nonexist = proof.GetNonexist()
assert.NotNil(t, nonexist)
assert.NotNil(t, nonexist.Left)
assert.Equal(t, 0, len(nonexist.Left.Path))
assert.Nil(t, nonexist.Right)
assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path10[:]))
badNonexist := nonexist
s.Set(key11, val2)
proof, err = s.GetProofICS23(key10) // In between two keys
assert.NoError(t, err)
nonexist = proof.GetNonexist()
assert.NotNil(t, nonexist)
assert.Equal(t, path10[:], nonexist.Key)
assert.NotNil(t, nonexist.Left)
assert.Equal(t, 1, len(nonexist.Left.Path))
assert.NotNil(t, nonexist.Right)
assert.Equal(t, 1, len(nonexist.Right.Path))
assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path10[:]))
// Make sure proofs work with a loaded store
root := s.Root()
s = store.LoadStore(txn, root)
proof, err = s.GetProofICS23(key10)
assert.NoError(t, err)
nonexist = proof.GetNonexist()
assert.Equal(t, path10[:], nonexist.Key)
assert.NotNil(t, nonexist.Left)
assert.Equal(t, 1, len(nonexist.Left.Path))
assert.NotNil(t, nonexist.Right)
assert.Equal(t, 1, len(nonexist.Right.Path))
assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path10[:]))
// Invalid proofs should fail to verify
badExist := exist // expired proof
assert.Error(t, badExist.Verify(ics23.SmtSpec, s.Root(), path01[:], val1))
badExist = nonexist.Left
badExist.Key = key01 // .Key must contain key path
assert.Error(t, badExist.Verify(ics23.SmtSpec, s.Root(), path01[:], val1))
badExist = nonexist.Left
badExist.Path[0].Prefix = []byte{0} // wrong inner node prefix
assert.Error(t, badExist.Verify(ics23.SmtSpec, s.Root(), path01[:], val1))
badExist = nonexist.Left
badExist.Path = []*ics23.InnerOp{} // empty path
assert.Error(t, badExist.Verify(ics23.SmtSpec, s.Root(), path01[:], val1))
assert.Error(t, badNonexist.Verify(ics23.SmtSpec, s.Root(), path10[:]))
badNonexist = nonexist
badNonexist.Key = key10
assert.Error(t, badNonexist.Verify(ics23.SmtSpec, s.Root(), path10[:]))
}

View File

@ -1,93 +0,0 @@
package smt
import (
"bytes"
"crypto/sha256"
"encoding/gob"
"hash"
"github.com/cosmos/cosmos-sdk/store/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/lazyledger/smt"
"github.com/tendermint/tendermint/crypto/merkle"
tmmerkle "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
type HasherType byte
const (
SHA256 HasherType = iota
)
const (
ProofType = "smt"
)
type ProofOp struct {
Root []byte
Key []byte
Hasher HasherType
Proof smt.SparseMerkleProof
}
var _ merkle.ProofOperator = (*ProofOp)(nil)
// NewProofOp returns a ProofOp for a SparseMerkleProof.
func NewProofOp(root, key []byte, hasher HasherType, proof smt.SparseMerkleProof) *ProofOp {
return &ProofOp{
Root: root,
Key: key,
Hasher: hasher,
Proof: proof,
}
}
func (p *ProofOp) Run(args [][]byte) ([][]byte, error) {
switch len(args) {
case 0: // non-membership proof
if !smt.VerifyProof(p.Proof, p.Root, p.Key, []byte{}, getHasher(p.Hasher)) {
return nil, sdkerrors.Wrapf(types.ErrInvalidProof, "proof did not verify absence of key: %s", p.Key)
}
case 1: // membership proof
if !smt.VerifyProof(p.Proof, p.Root, p.Key, args[0], getHasher(p.Hasher)) {
return nil, sdkerrors.Wrapf(types.ErrInvalidProof, "proof did not verify existence of key %s with given value %x", p.Key, args[0])
}
default:
return nil, sdkerrors.Wrapf(types.ErrInvalidProof, "args must be length 0 or 1, got: %d", len(args))
}
return [][]byte{p.Root}, nil
}
func (p *ProofOp) GetKey() []byte {
return p.Key
}
func (p *ProofOp) ProofOp() tmmerkle.ProofOp {
var data bytes.Buffer
enc := gob.NewEncoder(&data)
enc.Encode(p)
return tmmerkle.ProofOp{
Type: "smt",
Key: p.Key,
Data: data.Bytes(),
}
}
func ProofDecoder(pop tmmerkle.ProofOp) (merkle.ProofOperator, error) {
dec := gob.NewDecoder(bytes.NewBuffer(pop.Data))
var proof ProofOp
err := dec.Decode(&proof)
if err != nil {
return nil, err
}
return &proof, nil
}
func getHasher(hasher HasherType) hash.Hash {
switch hasher {
case SHA256:
return sha256.New()
default:
return nil
}
}

View File

@ -1,69 +0,0 @@
package smt_test
import (
"crypto/sha256"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/db/memdb"
smtstore "github.com/cosmos/cosmos-sdk/store/v2alpha1/smt"
"github.com/lazyledger/smt"
)
func TestProofOpInterface(t *testing.T) {
hasher := sha256.New()
nodes, values := memdb.NewDB(), memdb.NewDB()
tree := smt.NewSparseMerkleTree(nodes.ReadWriter(), values.ReadWriter(), hasher)
key := []byte("foo")
value := []byte("bar")
root, err := tree.Update(key, value)
require.NoError(t, err)
require.NotEmpty(t, root)
proof, err := tree.Prove(key)
require.True(t, smt.VerifyProof(proof, root, key, value, hasher))
storeProofOp := smtstore.NewProofOp(root, key, smtstore.SHA256, proof)
require.NotNil(t, storeProofOp)
// inclusion proof
r, err := storeProofOp.Run([][]byte{value})
assert.NoError(t, err)
assert.NotEmpty(t, r)
assert.Equal(t, root, r[0])
// inclusion proof - wrong value - should fail
r, err = storeProofOp.Run([][]byte{key})
assert.Error(t, err)
assert.Empty(t, r)
// exclusion proof - should fail
r, err = storeProofOp.Run([][]byte{})
assert.Error(t, err)
assert.Empty(t, r)
// invalid request - should fail
r, err = storeProofOp.Run([][]byte{key, key})
assert.Error(t, err)
assert.Empty(t, r)
// encode
tmProofOp := storeProofOp.ProofOp()
assert.NotNil(t, tmProofOp)
assert.Equal(t, smtstore.ProofType, tmProofOp.Type)
assert.Equal(t, key, tmProofOp.Key, key)
assert.NotEmpty(t, tmProofOp.Data)
// decode
decoded, err := smtstore.ProofDecoder(tmProofOp)
assert.NoError(t, err)
assert.NotNil(t, decoded)
assert.Equal(t, key, decoded.GetKey())
// run proof after decoding
r, err = decoded.Run([][]byte{value})
assert.NoError(t, err)
assert.NotEmpty(t, r)
assert.Equal(t, root, r[0])
}

View File

@ -1,143 +0,0 @@
package smt
import (
"crypto/sha256"
"errors"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/prefix"
"github.com/cosmos/cosmos-sdk/store/types"
ics23 "github.com/confio/ics23/go"
"github.com/lazyledger/smt"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
var (
_ types.BasicKVStore = (*Store)(nil)
_ smt.MapStore = (dbMapStore{})
)
var (
nodesPrefix = []byte{0}
valuesPrefix = []byte{1}
preimagesPrefix = []byte{2}
errKeyEmpty = errors.New("key is empty or nil")
errValueNil = errors.New("value is nil")
)
// Store Implements types.KVStore and CommitKVStore.
type Store struct {
tree *smt.SparseMerkleTree
values dbm.ReadWriter
// Map hashed keys back to preimage
preimages dbm.ReadWriter
}
// An smt.MapStore that wraps Get to raise smt.InvalidKeyError;
// smt.SparseMerkleTree expects this error to be returned when a key is not found
type dbMapStore struct{ dbm.ReadWriter }
func NewStore(db dbm.ReadWriter) *Store {
nodes := prefix.NewReadWriter(db, nodesPrefix)
values := prefix.NewReadWriter(db, valuesPrefix)
preimages := prefix.NewReadWriter(db, preimagesPrefix)
return &Store{
tree: smt.NewSparseMerkleTree(dbMapStore{nodes}, dbMapStore{values}, sha256.New()),
values: values,
preimages: preimages,
}
}
func LoadStore(db dbm.ReadWriter, root []byte) *Store {
nodes := prefix.NewReadWriter(db, nodesPrefix)
values := prefix.NewReadWriter(db, valuesPrefix)
preimages := prefix.NewReadWriter(db, preimagesPrefix)
return &Store{
tree: smt.ImportSparseMerkleTree(dbMapStore{nodes}, dbMapStore{values}, sha256.New(), root),
values: values,
preimages: preimages,
}
}
func (s *Store) GetProof(key []byte) (*tmcrypto.ProofOps, error) {
if len(key) == 0 {
return nil, errKeyEmpty
}
proof, err := s.tree.Prove(key)
if err != nil {
return nil, err
}
op := NewProofOp(s.tree.Root(), key, SHA256, proof)
return &tmcrypto.ProofOps{Ops: []tmcrypto.ProofOp{op.ProofOp()}}, nil
}
func (s *Store) GetProofICS23(key []byte) (*ics23.CommitmentProof, error) {
return createIcs23Proof(s, key)
}
func (s *Store) Root() []byte { return s.tree.Root() }
// BasicKVStore interface below:
// Get returns nil iff key doesn't exist. Panics on nil or empty key.
func (s *Store) Get(key []byte) []byte {
if len(key) == 0 {
panic(errKeyEmpty)
}
val, err := s.tree.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has checks if a key exists. Panics on nil or empty key.
func (s *Store) Has(key []byte) bool {
if len(key) == 0 {
panic(errKeyEmpty)
}
has, err := s.tree.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set sets the key. Panics on nil key or value.
func (s *Store) Set(key []byte, value []byte) {
if len(key) == 0 {
panic(errKeyEmpty)
}
if value == nil {
panic(errValueNil)
}
_, err := s.tree.Update(key, value)
if err != nil {
panic(err)
}
path := sha256.Sum256(key)
s.preimages.Set(path[:], key)
}
// Delete deletes the key. Panics on nil key.
func (s *Store) Delete(key []byte) {
if len(key) == 0 {
panic(errKeyEmpty)
}
_, _ = s.tree.Delete(key)
path := sha256.Sum256(key)
s.preimages.Delete(path[:])
}
func (ms dbMapStore) Get(key []byte) ([]byte, error) {
val, err := ms.ReadWriter.Get(key)
if err != nil {
return nil, err
}
if val == nil {
return nil, &smt.InvalidKeyError{Key: key}
}
return val, nil
}

View File

@ -1,46 +0,0 @@
package smt_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/cosmos/cosmos-sdk/db/memdb"
store "github.com/cosmos/cosmos-sdk/store/v2alpha1/smt"
)
func TestGetSetHasDelete(t *testing.T) {
db := memdb.NewDB()
s := store.NewStore(db.ReadWriter())
s.Set([]byte("foo"), []byte("bar"))
assert.Equal(t, []byte("bar"), s.Get([]byte("foo")))
assert.Equal(t, true, s.Has([]byte("foo")))
s.Delete([]byte("foo"))
assert.Equal(t, false, s.Has([]byte("foo")))
assert.Panics(t, func() { s.Get(nil) }, "Get(nil key) should panic")
assert.Panics(t, func() { s.Get([]byte{}) }, "Get(empty key) should panic")
assert.Panics(t, func() { s.Has(nil) }, "Has(nil key) should panic")
assert.Panics(t, func() { s.Has([]byte{}) }, "Has(empty key) should panic")
assert.Panics(t, func() { s.Set(nil, []byte("value")) }, "Set(nil key) should panic")
assert.Panics(t, func() { s.Set([]byte{}, []byte("value")) }, "Set(empty key) should panic")
assert.Panics(t, func() { s.Set([]byte("key"), nil) }, "Set(nil value) should panic")
}
func TestLoadStore(t *testing.T) {
db := memdb.NewDB()
txn := db.ReadWriter()
s := store.NewStore(txn)
s.Set([]byte{0}, []byte{0})
s.Set([]byte{1}, []byte{1})
s.Delete([]byte{1})
root := s.Root()
s = store.LoadStore(txn, root)
assert.Equal(t, []byte{0}, s.Get([]byte{0}))
assert.False(t, s.Has([]byte{1}))
s.Set([]byte{2}, []byte{2})
assert.NotEqual(t, root, s.Root())
}

View File

@ -1,47 +0,0 @@
package transient
import (
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/dbadapter"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.Committer = (*Store)(nil)
)
// Store is a wrapper for a memory store which does not persist data.
type Store struct {
dbadapter.Store
conn dbm.Connection
}
// NewStore constructs a new transient store.
func NewStore() *Store {
db := memdb.NewDB()
return &Store{
Store: dbadapter.Store{DB: db.ReadWriter()},
conn: db,
}
}
// Implements Store.
func (ts *Store) GetStoreType() types.StoreType {
return types.StoreTypeTransient
}
// Implements CommitStore
// Commit cleans up Store.
func (ts *Store) Commit() (id types.CommitID) {
ts.DB.Discard()
ts.Store = dbadapter.Store{DB: ts.conn.ReadWriter()}
return
}
func (ts *Store) SetPruning(pruningtypes.PruningOptions) {}
func (ts *Store) GetPruning() pruningtypes.PruningOptions { return pruningtypes.PruningOptions{} }
func (ts *Store) LastCommitID() (id types.CommitID) { return }

View File

@ -1,27 +0,0 @@
package transient_test
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2alpha1/transient"
)
var k, v = []byte("hello"), []byte("world")
func TestTransientStore(t *testing.T) {
tstore := transient.NewStore()
require.Nil(t, tstore.Get(k))
tstore.Set(k, v)
require.Equal(t, v, tstore.Get(k))
tstore.Commit()
require.Nil(t, tstore.Get(k))
emptyCommitID := tstore.LastCommitID()
require.Equal(t, emptyCommitID.Version, int64(0))
require.True(t, bytes.Equal(emptyCommitID.Hash, nil))
require.Equal(t, types.StoreTypeTransient, tstore.GetStoreType())
}

View File

@ -1,111 +0,0 @@
package types
import (
"io"
snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types"
v1 "github.com/cosmos/cosmos-sdk/store/types"
)
// Re-export relevant original store types
type (
StoreKey = v1.StoreKey
StoreType = v1.StoreType
CommitID = v1.CommitID
StoreUpgrades = v1.StoreUpgrades
StoreRename = v1.StoreRename
Iterator = v1.Iterator
TraceContext = v1.TraceContext
WriteListener = v1.WriteListener
BasicKVStore = v1.BasicKVStore
KVStore = v1.KVStore
Committer = v1.Committer
CommitKVStore = v1.CommitKVStore
CacheKVStore = v1.CacheKVStore
Queryable = v1.Queryable
CacheWrap = v1.CacheWrap
KVStoreKey = v1.KVStoreKey
MemoryStoreKey = v1.MemoryStoreKey
TransientStoreKey = v1.TransientStoreKey
KVPair = v1.KVPair
StoreKVPair = v1.StoreKVPair
)
// Re-export relevant constants, values and utility functions
const (
StoreTypeMemory = v1.StoreTypeMemory
StoreTypeTransient = v1.StoreTypeTransient
StoreTypeDB = v1.StoreTypeDB
StoreTypeSMT = v1.StoreTypeSMT
StoreTypePersistent = v1.StoreTypePersistent
)
var (
NewKVStoreKey = v1.NewKVStoreKey
PrefixEndBytes = v1.PrefixEndBytes
KVStorePrefixIterator = v1.KVStorePrefixIterator
KVStoreReversePrefixIterator = v1.KVStoreReversePrefixIterator
NewStoreKVPairWriteListener = v1.NewStoreKVPairWriteListener
ProofOpSMTCommitment = v1.ProofOpSMTCommitment
ProofOpSimpleMerkleCommitment = v1.ProofOpSimpleMerkleCommitment
CommitmentOpDecoder = v1.CommitmentOpDecoder
ProofOpFromMap = v1.ProofOpFromMap
NewSmtCommitmentOp = v1.NewSmtCommitmentOp
)
// BasicMultiStore defines a minimal interface for accessing root state.
type BasicMultiStore interface {
// Returns a KVStore which has access only to the namespace of the StoreKey.
// Panics if the key is not found in the schema.
GetKVStore(StoreKey) KVStore
}
// mixin interface for trace and listen methods
type rootStoreTraceListen interface {
TracingEnabled() bool
SetTracer(w io.Writer)
SetTraceContext(TraceContext)
ListeningEnabled(key StoreKey) bool
AddListeners(key StoreKey, listeners []WriteListener)
}
// CommitMultiStore defines a complete interface for persistent root state, including
// (read-only) access to past versions, pruning, trace/listen, and state snapshots.
type CommitMultiStore interface {
BasicMultiStore
rootStoreTraceListen
Committer
snapshottypes.Snapshotter
// Gets a read-only view of the store at a specific version.
// Returns an error if the version is not found.
GetVersion(int64) (BasicMultiStore, error)
// Closes the store and all backing transactions.
Close() error
// Returns a branched whose modifications are later merged back in.
CacheMultiStore() CacheMultiStore
// Defines the minimum version number that can be saved by this store.
SetInitialVersion(uint64) error
}
// CacheMultiStore defines a branch of the root state which can be written back to the source store.
type CacheMultiStore interface {
BasicMultiStore
rootStoreTraceListen
// Returns a branched whose modifications are later merged back in.
CacheMultiStore() CacheMultiStore
// Write all cached changes back to the source store. Note: this overwrites any intervening changes.
Write()
}
// MultiStorePersistentCache provides inter-block (persistent) caching capabilities for a CommitMultiStore.
// TODO: placeholder. Implement and redefine this
type MultiStorePersistentCache = v1.MultiStorePersistentCache