store: keys as bytes (#19775)
Co-authored-by: sontrinh16 <trinhleson2000@gmail.com> Co-authored-by: cool-developer <51834436+cool-develope@users.noreply.github.com> Co-authored-by: Aleksandr Bezobchuk <alexanderbez@users.noreply.github.com> Co-authored-by: Matt Kocubinski <mkocubinski@gmail.com>
This commit is contained in:
parent
fa19df111d
commit
3166ebbf91
@ -1,11 +1,23 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// Changeset is a list of changes to be written to disk
|
||||
type Changeset struct {
|
||||
Changes []StateChanges
|
||||
}
|
||||
|
||||
// StateChanges represents a set of changes to the state of an actor in storage.
|
||||
type StateChanges struct {
|
||||
Actor []byte // actor represents the space in storage where state is stored, previously this was called a "storekey"
|
||||
StateChanges []KVPair // StateChanges is a list of key-value pairs representing the changes to the state.
|
||||
Actor []byte // actor represents the space in storage where state is stored, previously this was called a "storekey"
|
||||
StateChanges KVPairs // StateChanges is a list of key-value pairs representing the changes to the state.
|
||||
}
|
||||
|
||||
// KVPairs represents a set of key-value pairs.
|
||||
type KVPairs = []KVPair
|
||||
|
||||
// KVPair represents a change in a key and value of state.
|
||||
// Remove being true signals the key must be removed from state.
|
||||
type KVPair struct {
|
||||
@ -16,3 +28,76 @@ type KVPair struct {
|
||||
// Remove is true when the key must be removed from state.
|
||||
Remove bool
|
||||
}
|
||||
|
||||
func NewChangeset() *Changeset {
|
||||
return &Changeset{}
|
||||
}
|
||||
|
||||
func NewChangesetWithPairs(pairs map[string]KVPairs) *Changeset {
|
||||
changes := make([]StateChanges, len(pairs))
|
||||
i := 0
|
||||
for storeKey, kvPairs := range pairs {
|
||||
changes[i] = StateChanges{
|
||||
Actor: []byte(storeKey),
|
||||
StateChanges: kvPairs,
|
||||
}
|
||||
i++
|
||||
}
|
||||
return &Changeset{
|
||||
Changes: changes,
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of key-value pairs in the batch.
|
||||
func (cs *Changeset) Size() int {
|
||||
cnt := 0
|
||||
for _, pairs := range cs.Changes {
|
||||
cnt += len(pairs.StateChanges)
|
||||
}
|
||||
|
||||
return cnt
|
||||
}
|
||||
|
||||
// Add adds a key-value pair to the ChangeSet.
|
||||
func (cs *Changeset) Add(storeKey, key, value []byte, remove bool) {
|
||||
found := false
|
||||
for i, pairs := range cs.Changes {
|
||||
if bytes.Equal(storeKey, pairs.Actor) {
|
||||
pairs.StateChanges = append(pairs.StateChanges, KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Remove: remove,
|
||||
})
|
||||
cs.Changes[i] = pairs
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
cs.Changes = append(cs.Changes, StateChanges{
|
||||
Actor: storeKey,
|
||||
StateChanges: []KVPair{{Key: key, Value: value, Remove: remove}},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AddKVPair adds a KVPair to the ChangeSet.
|
||||
func (cs *Changeset) AddKVPair(storeKey []byte, pair KVPair) {
|
||||
found := false
|
||||
for i, pairs := range cs.Changes {
|
||||
if bytes.Equal(storeKey, pairs.Actor) {
|
||||
pairs.StateChanges = append(pairs.StateChanges, pair)
|
||||
cs.Changes[i] = pairs
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
cs.Changes = append(cs.Changes, StateChanges{
|
||||
Actor: storeKey,
|
||||
StateChanges: []KVPair{pair},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,63 +0,0 @@
|
||||
package store
|
||||
|
||||
// KVPair defines a key-value pair with additional metadata that is used to
|
||||
// track writes. Deletion can be denoted by a nil value or explicitly by the
|
||||
// Delete field.
|
||||
type KVPair struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
StoreKey string // Optional for snapshot restore
|
||||
}
|
||||
|
||||
type KVPairs []KVPair
|
||||
|
||||
// Changeset defines a set of KVPair entries by maintaining a map from store key
|
||||
// to a slice of KVPair objects.
|
||||
type Changeset struct {
|
||||
Pairs map[string]KVPairs
|
||||
}
|
||||
|
||||
func NewChangeset() *Changeset {
|
||||
return &Changeset{
|
||||
Pairs: make(map[string]KVPairs),
|
||||
}
|
||||
}
|
||||
|
||||
func NewChangesetWithPairs(pairs map[string]KVPairs) *Changeset {
|
||||
return &Changeset{
|
||||
Pairs: pairs,
|
||||
}
|
||||
}
|
||||
|
||||
// Size returns the number of key-value pairs in the batch.
|
||||
func (cs *Changeset) Size() int {
|
||||
cnt := 0
|
||||
for _, pairs := range cs.Pairs {
|
||||
cnt += len(pairs)
|
||||
}
|
||||
|
||||
return cnt
|
||||
}
|
||||
|
||||
// Add adds a key-value pair to the ChangeSet.
|
||||
func (cs *Changeset) Add(storeKey string, key, value []byte) {
|
||||
cs.Pairs[storeKey] = append(cs.Pairs[storeKey], KVPair{
|
||||
Key: key,
|
||||
Value: value,
|
||||
StoreKey: storeKey,
|
||||
})
|
||||
}
|
||||
|
||||
// AddKVPair adds a KVPair to the ChangeSet.
|
||||
func (cs *Changeset) AddKVPair(storeKey string, pair KVPair) {
|
||||
cs.Pairs[storeKey] = append(cs.Pairs[storeKey], pair)
|
||||
}
|
||||
|
||||
// Merge merges the provided Changeset argument into the receiver. This may be
|
||||
// useful when you have a Changeset that only pertains to a single store key,
|
||||
// i.e. a map of size one, and you want to merge it into another.
|
||||
func (cs *Changeset) Merge(other *Changeset) {
|
||||
for storeKey, pairs := range other.Pairs {
|
||||
cs.Pairs[storeKey] = append(cs.Pairs[storeKey], pairs...)
|
||||
}
|
||||
}
|
||||
@ -9,8 +9,10 @@ import (
|
||||
|
||||
protoio "github.com/cosmos/gogoproto/io"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
internal "cosmossdk.io/store/v2/internal/conv"
|
||||
"cosmossdk.io/store/v2/internal/encoding"
|
||||
"cosmossdk.io/store/v2/proof"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
@ -42,7 +44,7 @@ type CommitStore struct {
|
||||
}
|
||||
|
||||
// NewCommitStore creates a new CommitStore instance.
|
||||
func NewCommitStore(multiTrees map[string]Tree, db store.RawDB, pruneOpts *store.PruneOptions, logger log.Logger) (*CommitStore, error) {
|
||||
func NewCommitStore(trees map[string]Tree, db store.RawDB, pruneOpts *store.PruneOptions, logger log.Logger) (*CommitStore, error) {
|
||||
if pruneOpts == nil {
|
||||
pruneOpts = store.DefaultPruneOptions()
|
||||
}
|
||||
@ -50,19 +52,22 @@ func NewCommitStore(multiTrees map[string]Tree, db store.RawDB, pruneOpts *store
|
||||
return &CommitStore{
|
||||
logger: logger,
|
||||
db: db,
|
||||
multiTrees: multiTrees,
|
||||
multiTrees: trees,
|
||||
pruneOptions: pruneOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *CommitStore) WriteBatch(cs *store.Changeset) error {
|
||||
for storeKey, pairs := range cs.Pairs {
|
||||
tree, ok := c.multiTrees[storeKey]
|
||||
func (c *CommitStore) WriteBatch(cs *corestore.Changeset) error {
|
||||
for _, pairs := range cs.Changes {
|
||||
|
||||
key := internal.UnsafeBytesToStr(pairs.Actor)
|
||||
|
||||
tree, ok := c.multiTrees[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("store key %s not found in multiTrees", storeKey)
|
||||
return fmt.Errorf("store key %s not found in multiTrees", key)
|
||||
}
|
||||
for _, kv := range pairs {
|
||||
if kv.Value == nil {
|
||||
for _, kv := range pairs.StateChanges {
|
||||
if kv.Remove {
|
||||
if err := tree.Remove(kv.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -78,8 +83,9 @@ func (c *CommitStore) WriteBatch(cs *store.Changeset) error {
|
||||
func (c *CommitStore) WorkingCommitInfo(version uint64) *proof.CommitInfo {
|
||||
storeInfos := make([]proof.StoreInfo, 0, len(c.multiTrees))
|
||||
for storeKey, tree := range c.multiTrees {
|
||||
bz := []byte(storeKey)
|
||||
storeInfos = append(storeInfos, proof.StoreInfo{
|
||||
Name: storeKey,
|
||||
Name: bz,
|
||||
CommitID: proof.CommitID{
|
||||
Version: version,
|
||||
Hash: tree.WorkingHash(),
|
||||
@ -210,7 +216,7 @@ func (c *CommitStore) Commit(version uint64) (*proof.CommitInfo, error) {
|
||||
}
|
||||
}
|
||||
storeInfos = append(storeInfos, proof.StoreInfo{
|
||||
Name: storeKey,
|
||||
Name: []byte(storeKey),
|
||||
CommitID: commitID,
|
||||
})
|
||||
}
|
||||
@ -244,8 +250,8 @@ func (c *CommitStore) SetInitialVersion(version uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CommitStore) GetProof(storeKey string, version uint64, key []byte) ([]proof.CommitmentOp, error) {
|
||||
tree, ok := c.multiTrees[storeKey]
|
||||
func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) {
|
||||
tree, ok := c.multiTrees[internal.UnsafeBytesToStr(storeKey)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("store %s not found", storeKey)
|
||||
}
|
||||
@ -270,8 +276,8 @@ func (c *CommitStore) GetProof(storeKey string, version uint64, key []byte) ([]p
|
||||
return []proof.CommitmentOp{commitOp, *storeCommitmentOp}, nil
|
||||
}
|
||||
|
||||
func (c *CommitStore) Get(storeKey string, version uint64, key []byte) ([]byte, error) {
|
||||
tree, ok := c.multiTrees[storeKey]
|
||||
func (c *CommitStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) {
|
||||
tree, ok := c.multiTrees[internal.UnsafeBytesToStr(storeKey)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("store %s not found", storeKey)
|
||||
}
|
||||
@ -370,7 +376,7 @@ func (c *CommitStore) Snapshot(version uint64, protoWriter protoio.Writer) error
|
||||
}
|
||||
|
||||
// Restore implements snapshotstypes.CommitSnapshotter.
|
||||
func (c *CommitStore) Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *store.KVPair) (snapshotstypes.SnapshotItem, error) {
|
||||
func (c *CommitStore) Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges) (snapshotstypes.SnapshotItem, error) {
|
||||
var (
|
||||
importer Importer
|
||||
snapshotItem snapshotstypes.SnapshotItem
|
||||
@ -395,8 +401,9 @@ loop:
|
||||
}
|
||||
importer.Close()
|
||||
}
|
||||
|
||||
storeKey = item.Store.Name
|
||||
tree := c.multiTrees[storeKey]
|
||||
tree := c.multiTrees[item.Store.Name]
|
||||
if tree == nil {
|
||||
return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", storeKey)
|
||||
}
|
||||
@ -424,11 +431,18 @@ loop:
|
||||
if node.Value == nil {
|
||||
node.Value = []byte{}
|
||||
}
|
||||
|
||||
key := []byte(storeKey)
|
||||
// If the node is a leaf node, it will be written to the storage.
|
||||
chStorage <- &store.KVPair{
|
||||
Key: node.Key,
|
||||
Value: node.Value,
|
||||
StoreKey: storeKey,
|
||||
chStorage <- &corestore.StateChanges{
|
||||
Actor: key,
|
||||
StateChanges: []corestore.KVPair{
|
||||
{
|
||||
Key: node.Key,
|
||||
Value: node.Value,
|
||||
Remove: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
err := importer.Add(node)
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
package commitment
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
dbm "cosmossdk.io/store/v2/db"
|
||||
@ -34,16 +36,16 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() {
|
||||
latestVersion := uint64(10)
|
||||
kvCount := 10
|
||||
for i := uint64(1); i <= latestVersion; i++ {
|
||||
kvPairs := make(map[string]store.KVPairs)
|
||||
kvPairs := make(map[string]corestore.KVPairs)
|
||||
for _, storeKey := range storeKeys {
|
||||
kvPairs[storeKey] = store.KVPairs{}
|
||||
kvPairs[storeKey] = corestore.KVPairs{}
|
||||
for j := 0; j < kvCount; j++ {
|
||||
key := []byte(fmt.Sprintf("key-%d-%d", i, j))
|
||||
value := []byte(fmt.Sprintf("value-%d-%d", i, j))
|
||||
kvPairs[storeKey] = append(kvPairs[storeKey], store.KVPair{Key: key, Value: value})
|
||||
kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value})
|
||||
}
|
||||
}
|
||||
s.Require().NoError(commitStore.WriteBatch(store.NewChangesetWithPairs(kvPairs)))
|
||||
s.Require().NoError(commitStore.WriteBatch(corestore.NewChangesetWithPairs(kvPairs)))
|
||||
|
||||
_, err = commitStore.Commit(i)
|
||||
s.Require().NoError(err)
|
||||
@ -79,13 +81,15 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() {
|
||||
|
||||
streamReader, err := snapshots.NewStreamReader(chunks)
|
||||
s.Require().NoError(err)
|
||||
chStorage := make(chan *store.KVPair, 100)
|
||||
chStorage := make(chan *corestore.StateChanges, 100)
|
||||
leaves := make(map[string]string)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for kv := range chStorage {
|
||||
leaves[fmt.Sprintf("%s_%s", kv.StoreKey, kv.Key)] = string(kv.Value)
|
||||
for _, actor := range kv.StateChanges {
|
||||
leaves[fmt.Sprintf("%s_%s", kv.Actor, actor.Key)] = string(actor.Value)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
@ -110,7 +114,7 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() {
|
||||
for _, storeInfo := range targetCommitInfo.StoreInfos {
|
||||
matched := false
|
||||
for _, latestStoreInfo := range cInfo.StoreInfos {
|
||||
if storeInfo.Name == latestStoreInfo.Name {
|
||||
if bytes.Equal(storeInfo.Name, latestStoreInfo.Name) {
|
||||
s.Require().Equal(latestStoreInfo.GetHash(), storeInfo.GetHash())
|
||||
matched = true
|
||||
}
|
||||
@ -131,16 +135,16 @@ func (s *CommitStoreTestSuite) TestStore_Pruning() {
|
||||
latestVersion := uint64(100)
|
||||
kvCount := 10
|
||||
for i := uint64(1); i <= latestVersion; i++ {
|
||||
kvPairs := make(map[string]store.KVPairs)
|
||||
kvPairs := make(map[string]corestore.KVPairs)
|
||||
for _, storeKey := range storeKeys {
|
||||
kvPairs[storeKey] = store.KVPairs{}
|
||||
kvPairs[storeKey] = corestore.KVPairs{}
|
||||
for j := 0; j < kvCount; j++ {
|
||||
key := []byte(fmt.Sprintf("key-%d-%d", i, j))
|
||||
value := []byte(fmt.Sprintf("value-%d-%d", i, j))
|
||||
kvPairs[storeKey] = append(kvPairs[storeKey], store.KVPair{Key: key, Value: value})
|
||||
kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value})
|
||||
}
|
||||
}
|
||||
s.Require().NoError(commitStore.WriteBatch(store.NewChangesetWithPairs(kvPairs)))
|
||||
s.Require().NoError(commitStore.WriteBatch(corestore.NewChangesetWithPairs(kvPairs)))
|
||||
|
||||
_, err = commitStore.Commit(i)
|
||||
s.Require().NoError(err)
|
||||
|
||||
@ -12,13 +12,13 @@ type Reader interface {
|
||||
// Has retrieves if a key is present in the key-value data store.
|
||||
//
|
||||
// Note: <key> is safe to modify and read after calling Has.
|
||||
Has(storeKey string, key []byte) (bool, error)
|
||||
Has(storeKey, key []byte) (bool, error)
|
||||
|
||||
// Get retrieves the given key if it's present in the key-value data store.
|
||||
//
|
||||
// Note: <key> is safe to modify and read after calling Get.
|
||||
// The returned byte slice is safe to read, but cannot be modified.
|
||||
Get(storeKey string, key []byte) ([]byte, error)
|
||||
Get(storeKey []byte, key []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// Writer wraps the Set method of a backing data store.
|
||||
@ -26,12 +26,12 @@ type Writer interface {
|
||||
// Set inserts the given value into the key-value data store.
|
||||
//
|
||||
// Note: <key, value> are safe to modify and read after calling Set.
|
||||
Set(storeKey string, key, value []byte) error
|
||||
Set(storeKey []byte, key, value []byte) error
|
||||
|
||||
// Delete removes the key from the backing key-value data store.
|
||||
//
|
||||
// Note: <key> is safe to modify and read after calling Delete.
|
||||
Delete(storeKey string, key []byte) error
|
||||
Delete(storeKey []byte, key []byte) error
|
||||
}
|
||||
|
||||
// Database contains all the methods required to allow handling different
|
||||
@ -46,15 +46,15 @@ type Database interface {
|
||||
// VersionedDatabase defines an API for a versioned database that allows reads,
|
||||
// writes, iteration and commitment over a series of versions.
|
||||
type VersionedDatabase interface {
|
||||
Has(storeKey string, version uint64, key []byte) (bool, error)
|
||||
Get(storeKey string, version uint64, key []byte) ([]byte, error)
|
||||
Has(storeKey []byte, version uint64, key []byte) (bool, error)
|
||||
Get(storeKey []byte, version uint64, key []byte) ([]byte, error)
|
||||
GetLatestVersion() (uint64, error)
|
||||
SetLatestVersion(version uint64) error
|
||||
|
||||
Iterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
ReverseIterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
|
||||
ApplyChangeset(version uint64, cs *Changeset) error
|
||||
ApplyChangeset(version uint64, cs *corestore.Changeset) error
|
||||
|
||||
// Prune attempts to prune all versions up to and including the provided
|
||||
// version argument. The operation should be idempotent. An error should be
|
||||
@ -69,7 +69,7 @@ type VersionedDatabase interface {
|
||||
// Committer defines an API for committing state.
|
||||
type Committer interface {
|
||||
// WriteBatch writes a batch of key-value pairs to the tree.
|
||||
WriteBatch(cs *Changeset) error
|
||||
WriteBatch(cs *corestore.Changeset) error
|
||||
|
||||
// WorkingCommitInfo returns the CommitInfo for the working tree.
|
||||
WorkingCommitInfo(version uint64) *proof.CommitInfo
|
||||
@ -84,13 +84,13 @@ type Committer interface {
|
||||
Commit(version uint64) (*proof.CommitInfo, error)
|
||||
|
||||
// GetProof returns the proof of existence or non-existence for the given key.
|
||||
GetProof(storeKey string, version uint64, key []byte) ([]proof.CommitmentOp, error)
|
||||
GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error)
|
||||
|
||||
// Get returns the value for the given key at the given version.
|
||||
//
|
||||
// NOTE: This method only exists to support migration from IAVL v0/v1 to v2.
|
||||
// Once migration is complete, this method should be removed and/or not used.
|
||||
Get(storeKey string, version uint64, key []byte) ([]byte, error)
|
||||
Get(storeKey []byte, version uint64, key []byte) ([]byte, error)
|
||||
|
||||
// SetInitialVersion sets the initial version of the tree.
|
||||
SetInitialVersion(version uint64) error
|
||||
|
||||
2
store/internal/conv/doc.go
Normal file
2
store/internal/conv/doc.go
Normal file
@ -0,0 +1,2 @@
|
||||
// Package conv provides internal functions for conversions and data manipulation
|
||||
package conv
|
||||
19
store/internal/conv/string.go
Normal file
19
store/internal/conv/string.go
Normal file
@ -0,0 +1,19 @@
|
||||
package conv
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// UnsafeStrToBytes uses unsafe to convert string into byte array. Returned bytes
|
||||
// must not be altered after this function is called as it will cause a segmentation fault.
|
||||
func UnsafeStrToBytes(s string) []byte {
|
||||
return unsafe.Slice(unsafe.StringData(s), len(s)) // ref https://github.com/golang/go/issues/53003#issuecomment-1140276077
|
||||
}
|
||||
|
||||
// UnsafeBytesToStr is meant to make a zero allocation conversion
|
||||
// from []byte -> string to speed up operations, it is not meant
|
||||
// to be used generally, but for a specific pattern to delete keys
|
||||
// from a map.
|
||||
func UnsafeBytesToStr(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
54
store/internal/conv/string_test.go
Normal file
54
store/internal/conv/string_test.go
Normal file
@ -0,0 +1,54 @@
|
||||
package conv
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
func TestStringSuite(t *testing.T) {
|
||||
suite.Run(t, new(StringSuite))
|
||||
}
|
||||
|
||||
type StringSuite struct{ suite.Suite }
|
||||
|
||||
func unsafeConvertStr() []byte {
|
||||
return UnsafeStrToBytes("abc")
|
||||
}
|
||||
|
||||
func (s *StringSuite) TestUnsafeStrToBytes() {
|
||||
// we convert in other function to trigger GC. We want to check that
|
||||
// the underlying array in []bytes is accessible after GC will finish swapping.
|
||||
for i := 0; i < 5; i++ {
|
||||
b := unsafeConvertStr()
|
||||
runtime.GC()
|
||||
<-time.NewTimer(2 * time.Millisecond).C
|
||||
b2 := append(b, 'd')
|
||||
s.Equal("abc", string(b))
|
||||
s.Equal("abcd", string(b2))
|
||||
}
|
||||
}
|
||||
|
||||
func unsafeConvertBytes() string {
|
||||
return UnsafeBytesToStr([]byte("abc"))
|
||||
}
|
||||
|
||||
func (s *StringSuite) TestUnsafeBytesToStr() {
|
||||
// we convert in other function to trigger GC. We want to check that
|
||||
// the underlying array in []bytes is accessible after GC will finish swapping.
|
||||
for i := 0; i < 5; i++ {
|
||||
str := unsafeConvertBytes()
|
||||
runtime.GC()
|
||||
<-time.NewTimer(2 * time.Millisecond).C
|
||||
s.Equal("abc", str)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnsafeStrToBytes(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
UnsafeStrToBytes(strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
@ -3,8 +3,8 @@ package migration
|
||||
import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
)
|
||||
|
||||
@ -45,7 +45,7 @@ func (m *Manager) Migrate(height uint64) error {
|
||||
}
|
||||
|
||||
// restore the snapshot
|
||||
chStorage := make(chan *store.KVPair, defaultStorageBufferSize)
|
||||
chStorage := make(chan *corestore.StateChanges, defaultStorageBufferSize)
|
||||
|
||||
eg := new(errgroup.Group)
|
||||
eg.Go(func() error {
|
||||
|
||||
@ -6,8 +6,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/commitment"
|
||||
"cosmossdk.io/store/v2/commitment/iavl"
|
||||
dbm "cosmossdk.io/store/v2/db"
|
||||
@ -60,10 +60,10 @@ func TestMigrateState(t *testing.T) {
|
||||
toVersion := uint64(100)
|
||||
keyCount := 10
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
cs := store.NewChangeset()
|
||||
cs := corestore.NewChangeset()
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
cs.Add(storeKey, []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)))
|
||||
cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false)
|
||||
}
|
||||
}
|
||||
require.NoError(t, orgCommitStore.WriteBatch(cs))
|
||||
@ -78,17 +78,17 @@ func TestMigrateState(t *testing.T) {
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.commitSnapshotter.(*commitment.CommitStore).Get(storeKey, toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
val, err := m.commitSnapshotter.(*commitment.CommitStore).Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
// check the latest state
|
||||
val, err := m.commitSnapshotter.(*commitment.CommitStore).Get("store1", toVersion-1, []byte("key-100-1"))
|
||||
val, err := m.commitSnapshotter.(*commitment.CommitStore).Get([]byte("store1"), toVersion-1, []byte("key-100-1"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
val, err = m.commitSnapshotter.(*commitment.CommitStore).Get("store2", toVersion-1, []byte("key-100-0"))
|
||||
val, err = m.commitSnapshotter.(*commitment.CommitStore).Get([]byte("store2"), toVersion-1, []byte("key-100-0"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
|
||||
@ -96,7 +96,7 @@ func TestMigrateState(t *testing.T) {
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.storageSnapshotter.(*storage.StorageStore).Get(storeKey, toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
val, err := m.storageSnapshotter.(*storage.StorageStore).Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
|
||||
@ -22,7 +22,7 @@ type (
|
||||
// StoreInfo defines store-specific commit information. It contains a reference
|
||||
// between a store name/key and the commit ID.
|
||||
StoreInfo struct {
|
||||
Name string
|
||||
Name []byte
|
||||
CommitID CommitID
|
||||
}
|
||||
|
||||
@ -49,14 +49,14 @@ func (ci *CommitInfo) Hash() []byte {
|
||||
return ci.CommitHash
|
||||
}
|
||||
|
||||
rootHash, _, _ := ci.GetStoreProof("")
|
||||
rootHash, _, _ := ci.GetStoreProof([]byte{})
|
||||
return rootHash
|
||||
}
|
||||
|
||||
// GetStoreCommitID returns the CommitID for the given store key.
|
||||
func (ci *CommitInfo) GetStoreCommitID(storeKey string) CommitID {
|
||||
func (ci *CommitInfo) GetStoreCommitID(storeKey []byte) CommitID {
|
||||
for _, si := range ci.StoreInfos {
|
||||
if si.Name == storeKey {
|
||||
if bytes.Equal(si.Name, storeKey) {
|
||||
return si.CommitID
|
||||
}
|
||||
}
|
||||
@ -66,9 +66,9 @@ func (ci *CommitInfo) GetStoreCommitID(storeKey string) CommitID {
|
||||
// GetStoreProof takes in a storeKey and returns a proof of the store key in addition
|
||||
// to the root hash it should be proved against. If an empty string is provided, the first
|
||||
// store based on lexographical ordering will be proved.
|
||||
func (ci *CommitInfo) GetStoreProof(storeKey string) ([]byte, *CommitmentOp, error) {
|
||||
func (ci *CommitInfo) GetStoreProof(storeKey []byte) ([]byte, *CommitmentOp, error) {
|
||||
sort.Slice(ci.StoreInfos, func(i, j int) bool {
|
||||
return ci.StoreInfos[i].Name < ci.StoreInfos[j].Name
|
||||
return bytes.Compare(ci.StoreInfos[i].Name, ci.StoreInfos[j].Name) < 0
|
||||
})
|
||||
|
||||
index := 0
|
||||
@ -79,7 +79,7 @@ func (ci *CommitInfo) GetStoreProof(storeKey string) ([]byte, *CommitmentOp, err
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if si.Name == storeKey {
|
||||
if bytes.Equal(si.Name, storeKey) {
|
||||
index = i
|
||||
}
|
||||
}
|
||||
@ -165,7 +165,7 @@ func (ci *CommitInfo) Unmarshal(buf []byte) error {
|
||||
return err
|
||||
}
|
||||
buf = buf[n:]
|
||||
ci.StoreInfos[i].Name = string(name)
|
||||
ci.StoreInfos[i].Name = name
|
||||
// CommitID
|
||||
hash, n, err := encoding.DecodeBytes(buf)
|
||||
if err != nil {
|
||||
|
||||
@ -12,27 +12,27 @@ func TestGetStoreProof(t *testing.T) {
|
||||
storeInfos []StoreInfo
|
||||
}{
|
||||
{[]StoreInfo{
|
||||
{"key1", CommitID{1, []byte("value1")}},
|
||||
{[]byte("key1"), CommitID{1, []byte("value1")}},
|
||||
}},
|
||||
{[]StoreInfo{
|
||||
{"key2", CommitID{1, []byte("value2")}},
|
||||
{"key1", CommitID{1, []byte("value1")}},
|
||||
{[]byte("key2"), CommitID{1, []byte("value2")}},
|
||||
{[]byte("key1"), CommitID{1, []byte("value1")}},
|
||||
}},
|
||||
{[]StoreInfo{
|
||||
{"key3", CommitID{1, []byte("value3")}},
|
||||
{"key2", CommitID{1, []byte("value2")}},
|
||||
{"key1", CommitID{1, []byte("value1")}},
|
||||
{[]byte("key3"), CommitID{1, []byte("value3")}},
|
||||
{[]byte("key2"), CommitID{1, []byte("value2")}},
|
||||
{[]byte("key1"), CommitID{1, []byte("value1")}},
|
||||
}},
|
||||
{[]StoreInfo{
|
||||
{"key2", CommitID{1, []byte("value2")}},
|
||||
{"key1", CommitID{1, []byte("value1")}},
|
||||
{"key3", CommitID{1, []byte("value3")}},
|
||||
{[]byte("key2"), CommitID{1, []byte("value2")}},
|
||||
{[]byte("key1"), CommitID{1, []byte("value1")}},
|
||||
{[]byte("key3"), CommitID{1, []byte("value3")}},
|
||||
}},
|
||||
{[]StoreInfo{
|
||||
{"key4", CommitID{1, []byte("value4")}},
|
||||
{"key1", CommitID{1, []byte("value1")}},
|
||||
{"key3", CommitID{1, []byte("value3")}},
|
||||
{"key2", CommitID{1, []byte("value2")}},
|
||||
{[]byte("key4"), CommitID{1, []byte("value4")}},
|
||||
{[]byte("key1"), CommitID{1, []byte("value1")}},
|
||||
{[]byte("key3"), CommitID{1, []byte("value3")}},
|
||||
{[]byte("key2"), CommitID{1, []byte("value2")}},
|
||||
}},
|
||||
}
|
||||
|
||||
@ -45,7 +45,7 @@ func TestGetStoreProof(t *testing.T) {
|
||||
}
|
||||
commitHash := ci.Hash()
|
||||
// make sure the store infos are sorted
|
||||
require.Equal(t, ci.StoreInfos[0].Name, "key1")
|
||||
require.Equal(t, ci.StoreInfos[0].Name, []byte("key1"))
|
||||
for _, si := range tc.storeInfos {
|
||||
// get the proof
|
||||
_, proof, err := ci.GetStoreProof(si.Name)
|
||||
|
||||
@ -45,7 +45,7 @@ func NewReader(v uint64, rs store.RootStore, actor []byte) *Reader {
|
||||
}
|
||||
|
||||
func (roa *Reader) Has(key []byte) (bool, error) {
|
||||
val, err := roa.rootStore.GetStateStorage().Has(string(roa.actor), roa.version, key) // TODO: move storekeys to []byte
|
||||
val, err := roa.rootStore.GetStateStorage().Has(roa.actor, roa.version, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -54,7 +54,7 @@ func (roa *Reader) Has(key []byte) (bool, error) {
|
||||
}
|
||||
|
||||
func (roa *Reader) Get(key []byte) ([]byte, error) {
|
||||
result, err := roa.rootStore.GetStateStorage().Get(string(roa.actor), roa.version, key) // TODO: move storekeys to []byte
|
||||
result, err := roa.rootStore.GetStateStorage().Get(roa.actor, roa.version, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -63,9 +63,9 @@ func (roa *Reader) Get(key []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (roa *Reader) Iterator(start, end []byte) (corestore.Iterator, error) {
|
||||
return roa.rootStore.GetStateStorage().Iterator(string(roa.actor), roa.version, start, end)
|
||||
return roa.rootStore.GetStateStorage().Iterator(roa.actor, roa.version, start, end)
|
||||
}
|
||||
|
||||
func (roa *Reader) ReverseIterator(start, end []byte) (corestore.Iterator, error) {
|
||||
return roa.rootStore.GetStateStorage().ReverseIterator(string(roa.actor), roa.version, start, end)
|
||||
return roa.rootStore.GetStateStorage().ReverseIterator(roa.actor, roa.version, start, end)
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ func (s *Store) GetLatestVersion() (uint64, error) {
|
||||
return lastCommitID.Version, nil
|
||||
}
|
||||
|
||||
func (s *Store) Query(storeKey string, version uint64, key []byte, prove bool) (store.QueryResult, error) {
|
||||
func (s *Store) Query(storeKey []byte, version uint64, key []byte, prove bool) (store.QueryResult, error) {
|
||||
if s.telemetry != nil {
|
||||
now := time.Now()
|
||||
defer s.telemetry.MeasureSince(now, "root_store", "query")
|
||||
@ -249,7 +249,7 @@ func (s *Store) SetCommitHeader(h *coreheader.Info) {
|
||||
// If working hash is nil, then we need to compute and set it on the root store
|
||||
// by constructing a CommitInfo object, which in turn creates and writes a batch
|
||||
// of the current changeset to the SC tree.
|
||||
func (s *Store) WorkingHash(cs *store.Changeset) ([]byte, error) {
|
||||
func (s *Store) WorkingHash(cs *corestore.Changeset) ([]byte, error) {
|
||||
if s.telemetry != nil {
|
||||
now := time.Now()
|
||||
defer s.telemetry.MeasureSince(now, "root_store", "working_hash")
|
||||
@ -271,7 +271,7 @@ func (s *Store) WorkingHash(cs *store.Changeset) ([]byte, error) {
|
||||
// with the same Changeset, which internally sets the working hash, retrieved by
|
||||
// writing a batch of the changeset to the SC tree, and CommitInfo on the root
|
||||
// store.
|
||||
func (s *Store) Commit(cs *store.Changeset) ([]byte, error) {
|
||||
func (s *Store) Commit(cs *corestore.Changeset) ([]byte, error) {
|
||||
if s.telemetry != nil {
|
||||
now := time.Now()
|
||||
defer s.telemetry.MeasureSince(now, "root_store", "commit")
|
||||
@ -342,7 +342,7 @@ func (s *Store) Prune(version uint64) error {
|
||||
// tree, which allows us to retrieve the working hash of the SC tree. Finally,
|
||||
// we construct a *CommitInfo and set that as lastCommitInfo. Note, this should
|
||||
// only be called once per block!
|
||||
func (s *Store) writeSC(cs *store.Changeset) error {
|
||||
func (s *Store) writeSC(cs *corestore.Changeset) error {
|
||||
if err := s.stateCommitment.WriteBatch(cs); err != nil {
|
||||
return fmt.Errorf("failed to write batch to SC store: %w", err)
|
||||
}
|
||||
@ -372,7 +372,7 @@ func (s *Store) writeSC(cs *store.Changeset) error {
|
||||
// should have already been written to the SC via WorkingHash(). This method
|
||||
// solely commits that batch. An error is returned if commit fails or if the
|
||||
// resulting commit hash is not equivalent to the working hash.
|
||||
func (s *Store) commitSC(cs *store.Changeset) error {
|
||||
func (s *Store) commitSC(cs *corestore.Changeset) error {
|
||||
cInfo, err := s.stateCommitment.Commit(s.lastCommitInfo.Version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit SC store: %w", err)
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
coreheader "cosmossdk.io/core/header"
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/commitment"
|
||||
@ -22,6 +23,12 @@ const (
|
||||
testStoreKey3 = "test_store_key3"
|
||||
)
|
||||
|
||||
var (
|
||||
testStoreKeyBytes = []byte(testStoreKey)
|
||||
testStoreKey2Bytes = []byte(testStoreKey2)
|
||||
testStoreKey3Bytes = []byte(testStoreKey3)
|
||||
)
|
||||
|
||||
type RootStoreTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
@ -80,12 +87,12 @@ func (s *RootStoreTestSuite) TestSetCommitHeader() {
|
||||
}
|
||||
|
||||
func (s *RootStoreTestSuite) TestQuery() {
|
||||
_, err := s.rootStore.Query("", 1, []byte("foo"), true)
|
||||
_, err := s.rootStore.Query([]byte{}, 1, []byte("foo"), true)
|
||||
s.Require().Error(err)
|
||||
|
||||
// write and commit a changeset
|
||||
cs := store.NewChangeset()
|
||||
cs.Add(testStoreKey, []byte("foo"), []byte("bar"))
|
||||
cs := corestore.NewChangeset()
|
||||
cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false)
|
||||
|
||||
workingHash, err := s.rootStore.WorkingHash(cs)
|
||||
s.Require().NoError(err)
|
||||
@ -97,7 +104,7 @@ func (s *RootStoreTestSuite) TestQuery() {
|
||||
s.Require().Equal(workingHash, commitHash)
|
||||
|
||||
// ensure the proof is non-nil for the corresponding version
|
||||
result, err := s.rootStore.Query(testStoreKey, 1, []byte("foo"), true)
|
||||
result, err := s.rootStore.Query([]byte(testStoreKey), 1, []byte("foo"), true)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(result.ProofOps)
|
||||
s.Require().Equal([]byte("foo"), result.ProofOps[0].Key)
|
||||
@ -107,8 +114,8 @@ func (s *RootStoreTestSuite) TestGetFallback() {
|
||||
sc := s.rootStore.GetStateCommitment()
|
||||
|
||||
// create a changeset and commit it to SC ONLY
|
||||
cs := store.NewChangeset()
|
||||
cs.Add(testStoreKey, []byte("foo"), []byte("bar"))
|
||||
cs := corestore.NewChangeset()
|
||||
cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false)
|
||||
|
||||
err := sc.WriteBatch(cs)
|
||||
s.Require().NoError(err)
|
||||
@ -118,25 +125,25 @@ func (s *RootStoreTestSuite) TestGetFallback() {
|
||||
s.Require().NoError(err)
|
||||
|
||||
// ensure we can query for the key, which should fallback to SC
|
||||
qResult, err := s.rootStore.Query(testStoreKey, 1, []byte("foo"), false)
|
||||
qResult, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("foo"), false)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte("bar"), qResult.Value)
|
||||
|
||||
// non-existent key
|
||||
qResult, err = s.rootStore.Query(testStoreKey, 1, []byte("non_existent_key"), false)
|
||||
qResult, err = s.rootStore.Query(testStoreKeyBytes, 1, []byte("non_existent_key"), false)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Nil(qResult.Value)
|
||||
}
|
||||
|
||||
func (s *RootStoreTestSuite) TestQueryProof() {
|
||||
cs := store.NewChangeset()
|
||||
cs := corestore.NewChangeset()
|
||||
// testStoreKey
|
||||
cs.Add(testStoreKey, []byte("key1"), []byte("value1"))
|
||||
cs.Add(testStoreKey, []byte("key2"), []byte("value2"))
|
||||
cs.Add(testStoreKeyBytes, []byte("key1"), []byte("value1"), false)
|
||||
cs.Add(testStoreKeyBytes, []byte("key2"), []byte("value2"), false)
|
||||
// testStoreKey2
|
||||
cs.Add(testStoreKey2, []byte("key3"), []byte("value3"))
|
||||
cs.Add(testStoreKey2Bytes, []byte("key3"), []byte("value3"), false)
|
||||
// testStoreKey3
|
||||
cs.Add(testStoreKey3, []byte("key4"), []byte("value4"))
|
||||
cs.Add(testStoreKey3Bytes, []byte("key4"), []byte("value4"), false)
|
||||
|
||||
// commit
|
||||
_, err := s.rootStore.WorkingHash(cs)
|
||||
@ -145,12 +152,12 @@ func (s *RootStoreTestSuite) TestQueryProof() {
|
||||
s.Require().NoError(err)
|
||||
|
||||
// query proof for testStoreKey
|
||||
result, err := s.rootStore.Query(testStoreKey, 1, []byte("key1"), true)
|
||||
result, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("key1"), true)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(result.ProofOps)
|
||||
cInfo, err := s.rootStore.GetStateCommitment().GetCommitInfo(1)
|
||||
s.Require().NoError(err)
|
||||
storeHash := cInfo.GetStoreCommitID(testStoreKey).Hash
|
||||
storeHash := cInfo.GetStoreCommitID(testStoreKeyBytes).Hash
|
||||
treeRoots, err := result.ProofOps[0].Run([][]byte{[]byte("value1")})
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(treeRoots[0], storeHash)
|
||||
@ -164,8 +171,8 @@ func (s *RootStoreTestSuite) TestLoadVersion() {
|
||||
for v := 1; v <= 5; v++ {
|
||||
val := fmt.Sprintf("val%03d", v) // val001, val002, ..., val005
|
||||
|
||||
cs := store.NewChangeset()
|
||||
cs.Add(testStoreKey, []byte("key"), []byte(val))
|
||||
cs := corestore.NewChangeset()
|
||||
cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false)
|
||||
|
||||
workingHash, err := s.rootStore.WorkingHash(cs)
|
||||
s.Require().NoError(err)
|
||||
@ -199,7 +206,7 @@ func (s *RootStoreTestSuite) TestLoadVersion() {
|
||||
_, ro, err := s.rootStore.StateLatest()
|
||||
s.Require().NoError(err)
|
||||
|
||||
reader, err := ro.GetReader([]byte(testStoreKey))
|
||||
reader, err := ro.GetReader(testStoreKeyBytes)
|
||||
s.Require().NoError(err)
|
||||
val, err := reader.Get([]byte("key"))
|
||||
s.Require().NoError(err)
|
||||
@ -209,8 +216,8 @@ func (s *RootStoreTestSuite) TestLoadVersion() {
|
||||
for v := 4; v <= 5; v++ {
|
||||
val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005
|
||||
|
||||
cs := store.NewChangeset()
|
||||
cs.Add(testStoreKey, []byte("key"), []byte(val))
|
||||
cs := corestore.NewChangeset()
|
||||
cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false)
|
||||
|
||||
workingHash, err := s.rootStore.WorkingHash(cs)
|
||||
s.Require().NoError(err)
|
||||
@ -231,7 +238,7 @@ func (s *RootStoreTestSuite) TestLoadVersion() {
|
||||
_, ro, err = s.rootStore.StateLatest()
|
||||
s.Require().NoError(err)
|
||||
|
||||
reader, err = ro.GetReader([]byte(testStoreKey))
|
||||
reader, err = ro.GetReader(testStoreKeyBytes)
|
||||
s.Require().NoError(err)
|
||||
val, err = reader.Get([]byte("key"))
|
||||
s.Require().NoError(err)
|
||||
@ -244,12 +251,12 @@ func (s *RootStoreTestSuite) TestCommit() {
|
||||
s.Require().Zero(lv)
|
||||
|
||||
// perform changes
|
||||
cs := store.NewChangeset()
|
||||
cs := corestore.NewChangeset()
|
||||
for i := 0; i < 100; i++ {
|
||||
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
|
||||
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
|
||||
|
||||
cs.Add(testStoreKey, []byte(key), []byte(val))
|
||||
cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false)
|
||||
}
|
||||
|
||||
// committing w/o calling WorkingHash should error
|
||||
@ -277,7 +284,7 @@ func (s *RootStoreTestSuite) TestCommit() {
|
||||
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
|
||||
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
|
||||
|
||||
reader, err := ro.GetReader([]byte(testStoreKey))
|
||||
reader, err := ro.GetReader(testStoreKeyBytes)
|
||||
s.Require().NoError(err)
|
||||
result, err := reader.Get([]byte(key))
|
||||
s.Require().NoError(err)
|
||||
@ -290,12 +297,12 @@ func (s *RootStoreTestSuite) TestStateAt() {
|
||||
// write keys over multiple versions
|
||||
for v := uint64(1); v <= 5; v++ {
|
||||
// perform changes
|
||||
cs := store.NewChangeset()
|
||||
cs := corestore.NewChangeset()
|
||||
for i := 0; i < 100; i++ {
|
||||
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
|
||||
val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1
|
||||
|
||||
cs.Add(testStoreKey, []byte(key), []byte(val))
|
||||
cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false)
|
||||
}
|
||||
|
||||
// execute WorkingHash and Commit
|
||||
@ -320,7 +327,7 @@ func (s *RootStoreTestSuite) TestStateAt() {
|
||||
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
|
||||
val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1
|
||||
|
||||
reader, err := ro.GetReader([]byte(testStoreKey))
|
||||
reader, err := ro.GetReader(testStoreKeyBytes)
|
||||
s.Require().NoError(err)
|
||||
result, err := reader.Get([]byte(key))
|
||||
s.Require().NoError(err)
|
||||
|
||||
@ -13,9 +13,9 @@ import (
|
||||
protoio "github.com/cosmos/gogoproto/io"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
snapshotstypes "cosmossdk.io/store/v2/snapshots/types"
|
||||
)
|
||||
@ -109,7 +109,7 @@ type mockCommitSnapshotter struct {
|
||||
}
|
||||
|
||||
func (m *mockCommitSnapshotter) Restore(
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *store.KVPair,
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
|
||||
) (snapshotstypes.SnapshotItem, error) {
|
||||
if format == 0 {
|
||||
return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat
|
||||
@ -157,7 +157,7 @@ func (m *mockCommitSnapshotter) SupportedFormats() []uint32 {
|
||||
|
||||
type mockStorageSnapshotter struct{}
|
||||
|
||||
func (m *mockStorageSnapshotter) Restore(version uint64, chStorage <-chan *store.KVPair) error {
|
||||
func (m *mockStorageSnapshotter) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio
|
||||
}
|
||||
|
||||
func (m *mockErrorCommitSnapshotter) Restore(
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *store.KVPair,
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
|
||||
) (snapshotstypes.SnapshotItem, error) {
|
||||
return snapshotstypes.SnapshotItem{}, errors.New("mock restore error")
|
||||
}
|
||||
@ -234,7 +234,7 @@ func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writ
|
||||
}
|
||||
|
||||
func (m *hungCommitSnapshotter) Restore(
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *store.KVPair,
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
|
||||
) (snapshotstypes.SnapshotItem, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
@ -11,9 +11,9 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
errorsmod "cosmossdk.io/errors"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
storeerrors "cosmossdk.io/store/v2/errors"
|
||||
"cosmossdk.io/store/v2/snapshots/types"
|
||||
)
|
||||
@ -392,7 +392,7 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.
|
||||
}
|
||||
|
||||
// chStorage is the channel to pass the KV pairs to the storage snapshotter.
|
||||
chStorage := make(chan *store.KVPair, defaultStorageChannelBufferSize)
|
||||
chStorage := make(chan *corestore.StateChanges, defaultStorageChannelBufferSize)
|
||||
defer close(chStorage)
|
||||
|
||||
storageErrs := make(chan error, 1)
|
||||
|
||||
@ -3,7 +3,7 @@ package snapshots
|
||||
import (
|
||||
protoio "github.com/cosmos/gogoproto/io"
|
||||
|
||||
"cosmossdk.io/store/v2"
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2/snapshots/types"
|
||||
)
|
||||
|
||||
@ -14,13 +14,13 @@ type CommitSnapshotter interface {
|
||||
Snapshot(version uint64, protoWriter protoio.Writer) error
|
||||
|
||||
// Restore restores the commitment state from the snapshot reader.
|
||||
Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *store.KVPair) (types.SnapshotItem, error)
|
||||
Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges) (types.SnapshotItem, error)
|
||||
}
|
||||
|
||||
// StorageSnapshotter defines an API for restoring snapshots of the storage state.
|
||||
type StorageSnapshotter interface {
|
||||
// Restore restores the storage state from the given channel.
|
||||
Restore(version uint64, chStorage <-chan *store.KVPair) error
|
||||
Restore(version uint64, chStorage <-chan *corestore.StateChanges) error
|
||||
}
|
||||
|
||||
// ExtensionPayloadReader read extension payloads,
|
||||
|
||||
@ -12,13 +12,13 @@ import (
|
||||
// backends, such as restoring snapshots.
|
||||
type Database interface {
|
||||
NewBatch(version uint64) (store.Batch, error)
|
||||
Has(storeKey string, version uint64, key []byte) (bool, error)
|
||||
Get(storeKey string, version uint64, key []byte) ([]byte, error)
|
||||
Has(storeKey []byte, version uint64, key []byte) (bool, error)
|
||||
Get(storeKey []byte, version uint64, key []byte) ([]byte, error)
|
||||
GetLatestVersion() (uint64, error)
|
||||
SetLatestVersion(version uint64) error
|
||||
|
||||
Iterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
ReverseIterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
|
||||
Prune(version uint64) error
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ func (b *Batch) Reset() {
|
||||
b.batch.Reset()
|
||||
}
|
||||
|
||||
func (b *Batch) set(storeKey string, tombstone uint64, key, value []byte) error {
|
||||
func (b *Batch) set(storeKey []byte, tombstone uint64, key, value []byte) error {
|
||||
prefixedKey := MVCCEncode(prependStoreKey(storeKey, key), b.version)
|
||||
prefixedVal := MVCCEncode(value, tombstone)
|
||||
|
||||
@ -56,11 +56,11 @@ func (b *Batch) set(storeKey string, tombstone uint64, key, value []byte) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) Set(storeKey string, key, value []byte) error {
|
||||
func (b *Batch) Set(storeKey []byte, key, value []byte) error {
|
||||
return b.set(storeKey, 0, key, value)
|
||||
}
|
||||
|
||||
func (b *Batch) Delete(storeKey string, key []byte) error {
|
||||
func (b *Batch) Delete(storeKey []byte, key []byte) error {
|
||||
return b.set(storeKey, b.version, key, []byte(tombstoneVal))
|
||||
}
|
||||
|
||||
|
||||
@ -138,7 +138,7 @@ func (db *Database) setPruneHeight(pruneVersion uint64) error {
|
||||
return db.storage.Set([]byte(pruneHeightKey), ts[:], &pebble.WriteOptions{Sync: db.sync})
|
||||
}
|
||||
|
||||
func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, error) {
|
||||
func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
val, err := db.Get(storeKey, version, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -147,7 +147,7 @@ func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, erro
|
||||
return val != nil, nil
|
||||
}
|
||||
|
||||
func (db *Database) Get(storeKey string, targetVersion uint64, key []byte) ([]byte, error) {
|
||||
func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) {
|
||||
if targetVersion < db.earliestVersion {
|
||||
return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion}
|
||||
}
|
||||
@ -267,7 +267,7 @@ func (db *Database) Prune(version uint64) error {
|
||||
return db.setPruneHeight(version)
|
||||
}
|
||||
|
||||
func (db *Database) Iterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, storeerrors.ErrKeyEmpty
|
||||
}
|
||||
@ -291,7 +291,7 @@ func (db *Database) Iterator(storeKey string, version uint64, start, end []byte)
|
||||
return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, false), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReverseIterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, storeerrors.ErrKeyEmpty
|
||||
}
|
||||
@ -315,11 +315,11 @@ func (db *Database) ReverseIterator(storeKey string, version uint64, start, end
|
||||
return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, true), nil
|
||||
}
|
||||
|
||||
func storePrefix(storeKey string) []byte {
|
||||
return []byte(fmt.Sprintf(StorePrefixTpl, storeKey))
|
||||
func storePrefix(storeKey []byte) []byte {
|
||||
return append([]byte(StorePrefixTpl), storeKey...)
|
||||
}
|
||||
|
||||
func prependStoreKey(storeKey string, key []byte) []byte {
|
||||
func prependStoreKey(storeKey []byte, key []byte) []byte {
|
||||
return append(storePrefix(storeKey), key...)
|
||||
}
|
||||
|
||||
@ -362,7 +362,7 @@ func valTombstoned(value []byte) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func getMVCCSlice(db *pebble.DB, storeKey string, key []byte, version uint64) ([]byte, error) {
|
||||
func getMVCCSlice(db *pebble.DB, storeKey []byte, key []byte, version uint64) ([]byte, error) {
|
||||
// end domain is exclusive, so we need to increment the version by 1
|
||||
if version < math.MaxUint64 {
|
||||
version++
|
||||
|
||||
@ -48,13 +48,13 @@ func (b Batch) Reset() {
|
||||
b.batch.Clear()
|
||||
}
|
||||
|
||||
func (b Batch) Set(storeKey string, key, value []byte) error {
|
||||
func (b Batch) Set(storeKey []byte, key, value []byte) error {
|
||||
prefixedKey := prependStoreKey(storeKey, key)
|
||||
b.batch.PutCFWithTS(b.cfHandle, prefixedKey, b.ts[:], value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b Batch) Delete(storeKey string, key []byte) error {
|
||||
func (b Batch) Delete(storeKey []byte, key []byte) error {
|
||||
prefixedKey := prependStoreKey(storeKey, key)
|
||||
b.batch.DeleteCFWithTS(b.cfHandle, prefixedKey, b.ts[:])
|
||||
return nil
|
||||
|
||||
@ -98,7 +98,7 @@ func (db *Database) NewBatch(version uint64) (store.Batch, error) {
|
||||
return NewBatch(db, version), nil
|
||||
}
|
||||
|
||||
func (db *Database) getSlice(storeKey string, version uint64, key []byte) (*grocksdb.Slice, error) {
|
||||
func (db *Database) getSlice(storeKey []byte, version uint64, key []byte) (*grocksdb.Slice, error) {
|
||||
if version < db.tsLow {
|
||||
return nil, errors.ErrVersionPruned{EarliestVersion: db.tsLow}
|
||||
}
|
||||
@ -131,7 +131,7 @@ func (db *Database) GetLatestVersion() (uint64, error) {
|
||||
return binary.LittleEndian.Uint64(bz), nil
|
||||
}
|
||||
|
||||
func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, error) {
|
||||
func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
slice, err := db.getSlice(storeKey, version, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -140,7 +140,7 @@ func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, erro
|
||||
return slice.Exists(), nil
|
||||
}
|
||||
|
||||
func (db *Database) Get(storeKey string, version uint64, key []byte) ([]byte, error) {
|
||||
func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) {
|
||||
slice, err := db.getSlice(storeKey, version, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get RocksDB slice: %w", err)
|
||||
@ -165,7 +165,7 @@ func (db *Database) Prune(version uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) Iterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, errors.ErrKeyEmpty
|
||||
}
|
||||
@ -181,7 +181,7 @@ func (db *Database) Iterator(storeKey string, version uint64, start, end []byte)
|
||||
return newRocksDBIterator(itr, prefix, start, end, false), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReverseIterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, errors.ErrKeyEmpty
|
||||
}
|
||||
@ -208,11 +208,11 @@ func newTSReadOptions(version uint64) *grocksdb.ReadOptions {
|
||||
return readOpts
|
||||
}
|
||||
|
||||
func storePrefix(storeKey string) []byte {
|
||||
return []byte(fmt.Sprintf(StorePrefixTpl, storeKey))
|
||||
func storePrefix(storeKey []byte) []byte {
|
||||
return append([]byte(StorePrefixTpl), storeKey...)
|
||||
}
|
||||
|
||||
func prependStoreKey(storeKey string, key []byte) []byte {
|
||||
func prependStoreKey(storeKey []byte, key []byte) []byte {
|
||||
return append(storePrefix(storeKey), key...)
|
||||
}
|
||||
|
||||
|
||||
@ -15,8 +15,8 @@ import (
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
storeKey1 = "store1"
|
||||
var (
|
||||
storeKey1 = []byte("store1")
|
||||
)
|
||||
|
||||
func TestStorageTestSuite(t *testing.T) {
|
||||
|
||||
@ -18,7 +18,7 @@ const (
|
||||
|
||||
type batchOp struct {
|
||||
action batchAction
|
||||
storeKey string
|
||||
storeKey []byte
|
||||
key, value []byte
|
||||
}
|
||||
|
||||
@ -52,13 +52,13 @@ func (b *Batch) Reset() {
|
||||
b.size = 0
|
||||
}
|
||||
|
||||
func (b *Batch) Set(storeKey string, key, value []byte) error {
|
||||
func (b *Batch) Set(storeKey []byte, key, value []byte) error {
|
||||
b.size += len(key) + len(value)
|
||||
b.ops = append(b.ops, batchOp{action: batchActionSet, storeKey: storeKey, key: key, value: value})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) Delete(storeKey string, key []byte) error {
|
||||
func (b *Batch) Delete(storeKey []byte, key []byte) error {
|
||||
b.size += len(key)
|
||||
b.ops = append(b.ops, batchOp{action: batchActionDel, storeKey: storeKey, key: key})
|
||||
return nil
|
||||
|
||||
@ -128,7 +128,7 @@ func (db *Database) SetLatestVersion(version uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, error) {
|
||||
func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
val, err := db.Get(storeKey, version, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -137,7 +137,7 @@ func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, erro
|
||||
return val != nil, nil
|
||||
}
|
||||
|
||||
func (db *Database) Get(storeKey string, targetVersion uint64, key []byte) ([]byte, error) {
|
||||
func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) {
|
||||
if targetVersion < db.earliestVersion {
|
||||
return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion}
|
||||
}
|
||||
@ -216,7 +216,7 @@ func (db *Database) Prune(version uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) Iterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, storeerrors.ErrKeyEmpty
|
||||
}
|
||||
@ -228,7 +228,7 @@ func (db *Database) Iterator(storeKey string, version uint64, start, end []byte)
|
||||
return newIterator(db, storeKey, version, start, end, false)
|
||||
}
|
||||
|
||||
func (db *Database) ReverseIterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, storeerrors.ErrKeyEmpty
|
||||
}
|
||||
@ -256,7 +256,7 @@ func (db *Database) PrintRowsDebug() {
|
||||
var sb strings.Builder
|
||||
for rows.Next() {
|
||||
var (
|
||||
storeKey string
|
||||
storeKey []byte
|
||||
key []byte
|
||||
value []byte
|
||||
version uint64
|
||||
|
||||
@ -13,8 +13,8 @@ import (
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
storeKey1 = "store1"
|
||||
var (
|
||||
storeKey1 = []byte("store1")
|
||||
)
|
||||
|
||||
func TestStorageTestSuite(t *testing.T) {
|
||||
|
||||
@ -21,7 +21,7 @@ type iterator struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func newIterator(db *Database, storeKey string, targetVersion uint64, start, end []byte, reverse bool) (*iterator, error) {
|
||||
func newIterator(db *Database, storeKey []byte, targetVersion uint64, start, end []byte, reverse bool) (*iterator, error) {
|
||||
if targetVersion < db.earliestVersion {
|
||||
return &iterator{
|
||||
start: start,
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/log"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
@ -20,8 +21,8 @@ import (
|
||||
"cosmossdk.io/store/v2/storage/sqlite"
|
||||
)
|
||||
|
||||
const (
|
||||
storeKey1 = "store1"
|
||||
var (
|
||||
storeKey1 = []byte("store1")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -70,9 +71,9 @@ func BenchmarkGet(b *testing.B) {
|
||||
_ = db.Close()
|
||||
}()
|
||||
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{string(storeKey1): {}})
|
||||
for i := 0; i < numKeyVals; i++ {
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: keys[i], Value: vals[i]})
|
||||
cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]})
|
||||
}
|
||||
|
||||
require.NoError(b, db.ApplyChangeset(1, cs))
|
||||
@ -106,7 +107,7 @@ func BenchmarkApplyChangeset(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{string(storeKey1): {}})
|
||||
for j := 0; j < 1000; j++ {
|
||||
key := make([]byte, 128)
|
||||
val := make([]byte, 128)
|
||||
@ -116,7 +117,7 @@ func BenchmarkApplyChangeset(b *testing.B) {
|
||||
_, err = rng.Read(val)
|
||||
require.NoError(b, err)
|
||||
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: key, Value: val})
|
||||
cs.AddKVPair(storeKey1, corestore.KVPair{Key: key, Value: val})
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
@ -153,9 +154,9 @@ func BenchmarkIterate(b *testing.B) {
|
||||
|
||||
b.StopTimer()
|
||||
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{string(storeKey1): {}})
|
||||
for i := 0; i < numKeyVals; i++ {
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: keys[i], Value: vals[i]})
|
||||
cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]})
|
||||
}
|
||||
|
||||
require.NoError(b, db.ApplyChangeset(1, cs))
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2"
|
||||
)
|
||||
|
||||
@ -15,6 +16,8 @@ const (
|
||||
storeKey1 = "store1"
|
||||
)
|
||||
|
||||
var storeKey1Bytes = []byte(storeKey1)
|
||||
|
||||
// StorageTestSuite defines a reusable test suite for all storage backends.
|
||||
type StorageTestSuite struct {
|
||||
suite.Suite
|
||||
@ -58,15 +61,15 @@ func (s *StorageTestSuite) TestDatabase_VersionedKeys() {
|
||||
defer db.Close()
|
||||
|
||||
for i := uint64(1); i <= 100; i++ {
|
||||
s.Require().NoError(db.ApplyChangeset(i, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{
|
||||
s.Require().NoError(db.ApplyChangeset(i, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{
|
||||
storeKey1: {{Key: []byte("key"), Value: []byte(fmt.Sprintf("value%03d", i))}},
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
for i := uint64(1); i <= 100; i++ {
|
||||
bz, err := db.Get(storeKey1, i, []byte("key"))
|
||||
bz, err := db.Get(storeKey1Bytes, i, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(fmt.Sprintf("value%03d", i), string(bz))
|
||||
}
|
||||
@ -78,69 +81,69 @@ func (s *StorageTestSuite) TestDatabase_GetVersionedKey() {
|
||||
defer db.Close()
|
||||
|
||||
// store a key at version 1
|
||||
s.Require().NoError(db.ApplyChangeset(1, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{
|
||||
s.Require().NoError(db.ApplyChangeset(1, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{
|
||||
storeKey1: {{Key: []byte("key"), Value: []byte("value001")}},
|
||||
},
|
||||
)))
|
||||
|
||||
// assume chain progresses to version 10 w/o any changes to key
|
||||
bz, err := db.Get(storeKey1, 10, []byte("key"))
|
||||
bz, err := db.Get(storeKey1Bytes, 10, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte("value001"), bz)
|
||||
|
||||
ok, err := db.Has(storeKey1, 10, []byte("key"))
|
||||
ok, err := db.Has(storeKey1Bytes, 10, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().True(ok)
|
||||
|
||||
// chain progresses to version 11 with an update to key
|
||||
s.Require().NoError(db.ApplyChangeset(11, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{
|
||||
s.Require().NoError(db.ApplyChangeset(11, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{
|
||||
storeKey1: {{Key: []byte("key"), Value: []byte("value011")}},
|
||||
},
|
||||
)))
|
||||
|
||||
bz, err = db.Get(storeKey1, 10, []byte("key"))
|
||||
bz, err = db.Get(storeKey1Bytes, 10, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte("value001"), bz)
|
||||
|
||||
ok, err = db.Has(storeKey1, 10, []byte("key"))
|
||||
ok, err = db.Has(storeKey1Bytes, 10, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().True(ok)
|
||||
|
||||
for i := uint64(11); i <= 14; i++ {
|
||||
bz, err = db.Get(storeKey1, i, []byte("key"))
|
||||
bz, err = db.Get(storeKey1Bytes, i, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte("value011"), bz)
|
||||
|
||||
ok, err = db.Has(storeKey1, i, []byte("key"))
|
||||
ok, err = db.Has(storeKey1Bytes, i, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().True(ok)
|
||||
}
|
||||
|
||||
// chain progresses to version 15 with a delete to key
|
||||
s.Require().NoError(db.ApplyChangeset(15, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{storeKey1: {{Key: []byte("key")}}},
|
||||
s.Require().NoError(db.ApplyChangeset(15, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{storeKey1: {{Key: []byte("key"), Remove: true}}},
|
||||
)))
|
||||
|
||||
// all queries up to version 14 should return the latest value
|
||||
for i := uint64(1); i <= 14; i++ {
|
||||
bz, err = db.Get(storeKey1, i, []byte("key"))
|
||||
bz, err = db.Get(storeKey1Bytes, i, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(bz)
|
||||
|
||||
ok, err = db.Has(storeKey1, i, []byte("key"))
|
||||
ok, err = db.Has(storeKey1Bytes, i, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().True(ok)
|
||||
}
|
||||
|
||||
// all queries after version 15 should return nil
|
||||
for i := uint64(15); i <= 17; i++ {
|
||||
bz, err = db.Get(storeKey1, i, []byte("key"))
|
||||
bz, err = db.Get(storeKey1Bytes, i, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().Nil(bz)
|
||||
|
||||
ok, err = db.Has(storeKey1, i, []byte("key"))
|
||||
ok, err = db.Has(storeKey1Bytes, i, []byte("key"))
|
||||
s.Require().NoError(err)
|
||||
s.Require().False(ok)
|
||||
}
|
||||
@ -151,14 +154,14 @@ func (s *StorageTestSuite) TestDatabase_ApplyChangeset() {
|
||||
s.Require().NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}})
|
||||
for i := 0; i < 100; i++ {
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Value: []byte("value")})
|
||||
cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Value: []byte("value")})
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if i%10 == 0 {
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: []byte(fmt.Sprintf("key%03d", i))})
|
||||
cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Remove: true})
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,7 +172,7 @@ func (s *StorageTestSuite) TestDatabase_ApplyChangeset() {
|
||||
s.Require().Equal(uint64(1), lv)
|
||||
|
||||
for i := 0; i < 1; i++ {
|
||||
ok, err := db.Has(storeKey1, 1, []byte(fmt.Sprintf("key%03d", i)))
|
||||
ok, err := db.Has(storeKey1Bytes, 1, []byte(fmt.Sprintf("key%03d", i)))
|
||||
s.Require().NoError(err)
|
||||
|
||||
if i%10 == 0 {
|
||||
@ -185,7 +188,7 @@ func (s *StorageTestSuite) TestDatabase_IteratorEmptyDomain() {
|
||||
s.Require().NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
iter, err := db.Iterator(storeKey1, 1, []byte{}, []byte{})
|
||||
iter, err := db.Iterator(storeKey1Bytes, 1, []byte{}, []byte{})
|
||||
s.Require().Error(err)
|
||||
s.Require().Nil(iter)
|
||||
}
|
||||
@ -195,7 +198,7 @@ func (s *StorageTestSuite) TestDatabase_IteratorClose() {
|
||||
s.Require().NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
iter, err := db.Iterator(storeKey1, 1, []byte("key000"), nil)
|
||||
iter, err := db.Iterator(storeKey1Bytes, 1, []byte("key000"), nil)
|
||||
s.Require().NoError(err)
|
||||
iter.Close()
|
||||
|
||||
@ -221,7 +224,7 @@ func (s *StorageTestSuite) TestDatabase_IteratorDomain() {
|
||||
|
||||
for name, tc := range testCases {
|
||||
s.Run(name, func() {
|
||||
iter, err := db.Iterator(storeKey1, 1, tc.start, tc.end)
|
||||
iter, err := db.Iterator(storeKey1Bytes, 1, tc.start, tc.end)
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer iter.Close()
|
||||
@ -238,19 +241,19 @@ func (s *StorageTestSuite) TestDatabase_Iterator() {
|
||||
s.Require().NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}})
|
||||
for i := 0; i < 100; i++ {
|
||||
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
|
||||
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
|
||||
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: []byte(key), Value: []byte(val)})
|
||||
cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false})
|
||||
}
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(1, cs))
|
||||
|
||||
// iterator without an end key over multiple versions
|
||||
for v := uint64(1); v < 5; v++ {
|
||||
itr, err := db.Iterator(storeKey1, v, []byte("key000"), nil)
|
||||
itr, err := db.Iterator(storeKey1Bytes, v, []byte("key000"), nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer itr.Close()
|
||||
@ -263,8 +266,8 @@ func (s *StorageTestSuite) TestDatabase_Iterator() {
|
||||
i++
|
||||
count++
|
||||
}
|
||||
s.Require().Equal(100, count)
|
||||
s.Require().NoError(itr.Error())
|
||||
s.Require().Equal(100, count)
|
||||
|
||||
// seek past domain, which should make the iterator invalid and produce an error
|
||||
s.Require().False(itr.Valid())
|
||||
@ -272,7 +275,7 @@ func (s *StorageTestSuite) TestDatabase_Iterator() {
|
||||
|
||||
// iterator with with a start and end domain over multiple versions
|
||||
for v := uint64(1); v < 5; v++ {
|
||||
itr2, err := db.Iterator(storeKey1, v, []byte("key010"), []byte("key019"))
|
||||
itr2, err := db.Iterator(storeKey1Bytes, v, []byte("key010"), []byte("key019"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer itr2.Close()
|
||||
@ -293,7 +296,7 @@ func (s *StorageTestSuite) TestDatabase_Iterator() {
|
||||
}
|
||||
|
||||
// start must be <= end
|
||||
iter3, err := db.Iterator(storeKey1, 1, []byte("key020"), []byte("key019"))
|
||||
iter3, err := db.Iterator(storeKey1Bytes, 1, []byte("key020"), []byte("key019"))
|
||||
s.Require().Error(err)
|
||||
s.Require().Nil(iter3)
|
||||
}
|
||||
@ -303,28 +306,28 @@ func (s *StorageTestSuite) TestDatabase_Iterator_RangedDeletes() {
|
||||
s.Require().NoError(err)
|
||||
defer db.Close()
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(1, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{
|
||||
s.Require().NoError(db.ApplyChangeset(1, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{
|
||||
storeKey1: {
|
||||
{Key: []byte("key001"), Value: []byte("value001")},
|
||||
{Key: []byte("key002"), Value: []byte("value001")},
|
||||
{Key: []byte("key001"), Value: []byte("value001"), Remove: false},
|
||||
{Key: []byte("key002"), Value: []byte("value001"), Remove: false},
|
||||
},
|
||||
},
|
||||
)))
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(5, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{
|
||||
storeKey1: {{Key: []byte("key002"), Value: []byte("value002")}},
|
||||
s.Require().NoError(db.ApplyChangeset(5, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{
|
||||
storeKey1: {{Key: []byte("key002"), Value: []byte("value002"), Remove: false}},
|
||||
},
|
||||
)))
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(10, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{
|
||||
storeKey1: {{Key: []byte("key002")}},
|
||||
s.Require().NoError(db.ApplyChangeset(10, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{
|
||||
storeKey1: {{Key: []byte("key002"), Remove: true}},
|
||||
},
|
||||
)))
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 11, []byte("key001"), nil)
|
||||
itr, err := db.Iterator(storeKey1Bytes, 11, []byte("key001"), nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer itr.Close()
|
||||
@ -346,12 +349,12 @@ func (s *StorageTestSuite) TestDatabase_IteratorMultiVersion() {
|
||||
|
||||
// for versions 1-49, set all 10 keys
|
||||
for v := uint64(1); v < 50; v++ {
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}})
|
||||
for i := 0; i < 10; i++ {
|
||||
key := fmt.Sprintf("key%03d", i)
|
||||
val := fmt.Sprintf("val%03d-%03d", i, v)
|
||||
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: []byte(key), Value: []byte(val)})
|
||||
cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)})
|
||||
}
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(v, cs))
|
||||
@ -359,20 +362,20 @@ func (s *StorageTestSuite) TestDatabase_IteratorMultiVersion() {
|
||||
|
||||
// for versions 50-100, only update even keys
|
||||
for v := uint64(50); v <= 100; v++ {
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}})
|
||||
for i := 0; i < 10; i++ {
|
||||
if i%2 == 0 {
|
||||
key := fmt.Sprintf("key%03d", i)
|
||||
val := fmt.Sprintf("val%03d-%03d", i, v)
|
||||
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: []byte(key), Value: []byte(val)})
|
||||
cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false})
|
||||
}
|
||||
}
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(v, cs))
|
||||
}
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 69, []byte("key000"), nil)
|
||||
itr, err := db.Iterator(storeKey1Bytes, 69, []byte("key000"), nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer itr.Close()
|
||||
@ -394,8 +397,8 @@ func (s *StorageTestSuite) TestDatabase_IteratorMultiVersion() {
|
||||
count++
|
||||
}
|
||||
|
||||
s.Require().Equal(10, count)
|
||||
s.Require().NoError(itr.Error())
|
||||
s.Require().Equal(10, count)
|
||||
}
|
||||
|
||||
func (s *StorageTestSuite) TestDatabaseIterator_SkipVersion() {
|
||||
@ -410,7 +413,7 @@ func (s *StorageTestSuite) TestDatabaseIterator_SkipVersion() {
|
||||
DBApplyChangeset(s.T(), db, 58833605, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value004")})
|
||||
DBApplyChangeset(s.T(), db, 58833606, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value006")})
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 58831525, []byte("key"), nil)
|
||||
itr, err := db.Iterator(storeKey1Bytes, 58831525, []byte("key"), nil)
|
||||
s.Require().NoError(err)
|
||||
defer itr.Close()
|
||||
|
||||
@ -419,6 +422,7 @@ func (s *StorageTestSuite) TestDatabaseIterator_SkipVersion() {
|
||||
count[string(itr.Key())] = struct{}{}
|
||||
}
|
||||
|
||||
s.Require().NoError(itr.Error())
|
||||
s.Require().Equal(3, len(count))
|
||||
}
|
||||
|
||||
@ -437,7 +441,7 @@ func (s *StorageTestSuite) TestDatabaseIterator_ForwardIteration() {
|
||||
DBApplyChangeset(s.T(), db, 4, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")})
|
||||
DBApplyChangeset(s.T(), db, 5, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")})
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 6, nil, []byte("keyZ"))
|
||||
itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer itr.Close()
|
||||
@ -446,6 +450,7 @@ func (s *StorageTestSuite) TestDatabaseIterator_ForwardIteration() {
|
||||
count++
|
||||
}
|
||||
|
||||
s.Require().NoError(itr.Error())
|
||||
s.Require().Equal(4, count)
|
||||
}
|
||||
|
||||
@ -463,7 +468,7 @@ func (s *StorageTestSuite) TestDatabaseIterator_ForwardIterationHigher() {
|
||||
DBApplyChangeset(s.T(), db, 14, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")})
|
||||
DBApplyChangeset(s.T(), db, 15, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")})
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 6, nil, []byte("keyZ"))
|
||||
itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer itr.Close()
|
||||
@ -483,19 +488,19 @@ func (s *StorageTestSuite) TestDatabase_IteratorNoDomain() {
|
||||
|
||||
// for versions 1-50, set all 10 keys
|
||||
for v := uint64(1); v <= 50; v++ {
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}})
|
||||
for i := 0; i < 10; i++ {
|
||||
key := fmt.Sprintf("key%03d", i)
|
||||
val := fmt.Sprintf("val%03d-%03d", i, v)
|
||||
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: []byte(key), Value: []byte(val)})
|
||||
cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false})
|
||||
}
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(v, cs))
|
||||
}
|
||||
|
||||
// create an iterator over the entire domain
|
||||
itr, err := db.Iterator(storeKey1, 50, nil, nil)
|
||||
itr, err := db.Iterator(storeKey1Bytes, 50, nil, nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
defer itr.Close()
|
||||
@ -508,8 +513,8 @@ func (s *StorageTestSuite) TestDatabase_IteratorNoDomain() {
|
||||
i++
|
||||
count++
|
||||
}
|
||||
s.Require().Equal(10, count)
|
||||
s.Require().NoError(itr.Error())
|
||||
s.Require().Equal(10, count)
|
||||
}
|
||||
|
||||
func (s *StorageTestSuite) TestDatabase_Prune() {
|
||||
@ -523,12 +528,12 @@ func (s *StorageTestSuite) TestDatabase_Prune() {
|
||||
|
||||
// for versions 1-50, set 10 keys
|
||||
for v := uint64(1); v <= 50; v++ {
|
||||
cs := store.NewChangesetWithPairs(map[string]store.KVPairs{storeKey1: {}})
|
||||
cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}})
|
||||
for i := 0; i < 10; i++ {
|
||||
key := fmt.Sprintf("key%03d", i)
|
||||
val := fmt.Sprintf("val%03d-%03d", i, v)
|
||||
|
||||
cs.AddKVPair(storeKey1, store.KVPair{Key: []byte(key), Value: []byte(val)})
|
||||
cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)})
|
||||
}
|
||||
|
||||
s.Require().NoError(db.ApplyChangeset(v, cs))
|
||||
@ -548,7 +553,7 @@ func (s *StorageTestSuite) TestDatabase_Prune() {
|
||||
key := fmt.Sprintf("key%03d", i)
|
||||
val := fmt.Sprintf("val%03d-%03d", i, v)
|
||||
|
||||
bz, err := db.Get(storeKey1, v, []byte(key))
|
||||
bz, err := db.Get(storeKey1Bytes, v, []byte(key))
|
||||
if v <= 25 {
|
||||
s.Require().Error(err)
|
||||
s.Require().Nil(bz)
|
||||
@ -559,7 +564,7 @@ func (s *StorageTestSuite) TestDatabase_Prune() {
|
||||
}
|
||||
}
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 25, []byte("key000"), nil)
|
||||
itr, err := db.Iterator(storeKey1Bytes, 25, []byte("key000"), nil)
|
||||
s.Require().NoError(err)
|
||||
s.Require().False(itr.Valid())
|
||||
|
||||
@ -570,7 +575,7 @@ func (s *StorageTestSuite) TestDatabase_Prune() {
|
||||
for i := 0; i < 10; i++ {
|
||||
key := fmt.Sprintf("key%03d", i)
|
||||
|
||||
bz, err := db.Get(storeKey1, v, []byte(key))
|
||||
bz, err := db.Get(storeKey1Bytes, v, []byte(key))
|
||||
s.Require().Error(err)
|
||||
s.Require().Nil(bz)
|
||||
}
|
||||
@ -589,44 +594,44 @@ func (s *StorageTestSuite) TestDatabase_Prune_KeepRecent() {
|
||||
key := []byte("key")
|
||||
|
||||
// write a key at three different versions
|
||||
s.Require().NoError(db.ApplyChangeset(1, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{storeKey1: {{Key: key, Value: []byte("val001")}}},
|
||||
s.Require().NoError(db.ApplyChangeset(1, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val001"), Remove: false}}},
|
||||
)))
|
||||
s.Require().NoError(db.ApplyChangeset(100, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{storeKey1: {{Key: key, Value: []byte("val100")}}},
|
||||
s.Require().NoError(db.ApplyChangeset(100, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val100"), Remove: false}}},
|
||||
)))
|
||||
s.Require().NoError(db.ApplyChangeset(200, store.NewChangesetWithPairs(
|
||||
map[string]store.KVPairs{storeKey1: {{Key: key, Value: []byte("val200")}}},
|
||||
s.Require().NoError(db.ApplyChangeset(200, corestore.NewChangesetWithPairs(
|
||||
map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val200"), Remove: false}}},
|
||||
)))
|
||||
|
||||
// prune version 50
|
||||
s.Require().NoError(db.Prune(50))
|
||||
|
||||
// ensure queries for versions 50 and older return nil
|
||||
bz, err := db.Get(storeKey1, 49, key)
|
||||
bz, err := db.Get(storeKey1Bytes, 49, key)
|
||||
s.Require().Error(err)
|
||||
s.Require().Nil(bz)
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 49, nil, nil)
|
||||
itr, err := db.Iterator(storeKey1Bytes, 49, nil, nil)
|
||||
s.Require().NoError(err)
|
||||
s.Require().False(itr.Valid())
|
||||
|
||||
defer itr.Close()
|
||||
|
||||
// ensure the value previously at version 1 is still there for queries greater than 50
|
||||
bz, err = db.Get(storeKey1, 51, key)
|
||||
bz, err = db.Get(storeKey1Bytes, 51, key)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte("val001"), bz)
|
||||
|
||||
// ensure the correct value at a greater height
|
||||
bz, err = db.Get(storeKey1, 200, key)
|
||||
bz, err = db.Get(storeKey1Bytes, 200, key)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte("val200"), bz)
|
||||
|
||||
// prune latest height and ensure we have the previous version when querying above it
|
||||
s.Require().NoError(db.Prune(200))
|
||||
|
||||
bz, err = db.Get(storeKey1, 201, key)
|
||||
bz, err = db.Get(storeKey1Bytes, 201, key)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte("val200"), bz)
|
||||
}
|
||||
@ -643,9 +648,14 @@ func DBApplyChangeset(
|
||||
require.Greater(t, version, uint64(0))
|
||||
require.Equal(t, len(keys), len(vals))
|
||||
|
||||
cs := store.NewChangeset()
|
||||
cs := corestore.NewChangeset()
|
||||
for i := 0; i < len(keys); i++ {
|
||||
cs.AddKVPair(storeKey, store.KVPair{Key: keys[i], Value: vals[i]})
|
||||
remove := false
|
||||
if vals[i] == nil {
|
||||
remove = true
|
||||
}
|
||||
|
||||
cs.AddKVPair([]byte(storeKey), corestore.KVPair{Key: keys[i], Value: vals[i], Remove: remove})
|
||||
}
|
||||
|
||||
require.NoError(t, db.ApplyChangeset(version, cs))
|
||||
|
||||
@ -42,30 +42,30 @@ func NewStorageStore(db Database, pruneOpts *store.PruneOptions, logger log.Logg
|
||||
}
|
||||
|
||||
// Has returns true if the key exists in the store.
|
||||
func (ss *StorageStore) Has(storeKey string, version uint64, key []byte) (bool, error) {
|
||||
func (ss *StorageStore) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
return ss.db.Has(storeKey, version, key)
|
||||
}
|
||||
|
||||
// Get returns the value associated with the given key.
|
||||
func (ss *StorageStore) Get(storeKey string, version uint64, key []byte) ([]byte, error) {
|
||||
func (ss *StorageStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) {
|
||||
return ss.db.Get(storeKey, version, key)
|
||||
}
|
||||
|
||||
// ApplyChangeset applies the given changeset to the storage.
|
||||
func (ss *StorageStore) ApplyChangeset(version uint64, cs *store.Changeset) error {
|
||||
func (ss *StorageStore) ApplyChangeset(version uint64, cs *corestore.Changeset) error {
|
||||
b, err := ss.db.NewBatch(version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for storeKey, pairs := range cs.Pairs {
|
||||
for _, kvPair := range pairs {
|
||||
if kvPair.Value == nil {
|
||||
if err := b.Delete(storeKey, kvPair.Key); err != nil {
|
||||
for _, pairs := range cs.Changes {
|
||||
for _, kvPair := range pairs.StateChanges {
|
||||
if kvPair.Remove {
|
||||
if err := b.Delete(pairs.Actor, kvPair.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := b.Set(storeKey, kvPair.Key, kvPair.Value); err != nil {
|
||||
if err := b.Set(pairs.Actor, kvPair.Key, kvPair.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -96,12 +96,12 @@ func (ss *StorageStore) SetLatestVersion(version uint64) error {
|
||||
}
|
||||
|
||||
// Iterator returns an iterator over the specified domain and prefix.
|
||||
func (ss *StorageStore) Iterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (ss *StorageStore) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
return ss.db.Iterator(storeKey, version, start, end)
|
||||
}
|
||||
|
||||
// ReverseIterator returns an iterator over the specified domain and prefix in reverse.
|
||||
func (ss *StorageStore) ReverseIterator(storeKey string, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
func (ss *StorageStore) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
return ss.db.ReverseIterator(storeKey, version, start, end)
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ func (ss *StorageStore) Prune(version uint64) error {
|
||||
}
|
||||
|
||||
// Restore restores the store from the given channel.
|
||||
func (ss *StorageStore) Restore(version uint64, chStorage <-chan *store.KVPair) error {
|
||||
func (ss *StorageStore) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error {
|
||||
latestVersion, err := ss.db.GetLatestVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest version: %w", err)
|
||||
@ -126,14 +126,16 @@ func (ss *StorageStore) Restore(version uint64, chStorage <-chan *store.KVPair)
|
||||
}
|
||||
|
||||
for kvPair := range chStorage {
|
||||
if err := b.Set(kvPair.StoreKey, kvPair.Key, kvPair.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.Size() > defaultBatchBufferSize {
|
||||
if err := b.Write(); err != nil {
|
||||
for _, kv := range kvPair.StateChanges {
|
||||
if err := b.Set(kvPair.Actor, kv.Key, kv.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.Size() > defaultBatchBufferSize {
|
||||
if err := b.Write(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -29,7 +29,7 @@ type RootStore interface {
|
||||
|
||||
// Query performs a query on the RootStore for a given store key, version (height),
|
||||
// and key tuple. Queries should be routed to the underlying SS engine.
|
||||
Query(storeKey string, version uint64, key []byte, prove bool) (QueryResult, error)
|
||||
Query(storeKey []byte, version uint64, key []byte, prove bool) (QueryResult, error)
|
||||
|
||||
// LoadVersion loads the RootStore to the given version.
|
||||
LoadVersion(version uint64) error
|
||||
@ -55,7 +55,7 @@ type RootStore interface {
|
||||
// is responsible for writing the Changeset to the SC backend and returning the
|
||||
// resulting root hash. Then, Commit() would return this hash and flush writes
|
||||
// to disk.
|
||||
WorkingHash(cs *Changeset) ([]byte, error)
|
||||
WorkingHash(cs *corestore.Changeset) ([]byte, error)
|
||||
|
||||
// Commit should be responsible for taking the provided changeset and flushing
|
||||
// it to disk. Note, depending on the implementation, the changeset, at this
|
||||
@ -63,7 +63,7 @@ type RootStore interface {
|
||||
// the changeset is committed to all SC and SC backends and flushed to disk.
|
||||
// It must return a hash of the merkle-ized committed state. This hash should
|
||||
// be the same as the hash returned by WorkingHash() prior to calling Commit().
|
||||
Commit(cs *Changeset) ([]byte, error)
|
||||
Commit(cs *corestore.Changeset) ([]byte, error)
|
||||
|
||||
// LastCommitID returns a CommitID pertaining to the last commitment.
|
||||
LastCommitID() (proof.CommitID, error)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user