refactor(store/v2)!: simplify storage (#22683)
This commit is contained in:
parent
5426cd8f34
commit
94cfcc11aa
@ -134,6 +134,7 @@ func (a *AppBuilder[T]) initGenesis(ctx context.Context, src io.Reader, txHandle
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read import state: %w", err)
|
||||
}
|
||||
|
||||
var genesisJSON map[string]json.RawMessage
|
||||
if err = json.Unmarshal(bz, &genesisJSON); err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -591,7 +591,7 @@ func TestConsensus_Query(t *testing.T) {
|
||||
c := setUpConsensus(t, 100_000, cometmock.MockMempool[mock.Tx]{})
|
||||
|
||||
// Write data to state storage
|
||||
err := c.store.GetStateStorage().ApplyChangeset(&store.Changeset{
|
||||
err := c.store.GetStateCommitment().WriteChangeset(&store.Changeset{
|
||||
Version: 1,
|
||||
Changes: []store.StateChanges{
|
||||
{
|
||||
@ -691,9 +691,8 @@ func setUpConsensus(t *testing.T, gasLimit uint64, mempool mempool.Mempool[mock.
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
ss := cometmock.NewMockStorage(log.NewNopLogger(), t.TempDir())
|
||||
sc := cometmock.NewMockCommiter(log.NewNopLogger(), string(actorName), "stf")
|
||||
mockStore := cometmock.NewMockStore(ss, sc)
|
||||
mockStore := cometmock.NewMockStore(sc)
|
||||
|
||||
am := appmanager.New(appmanager.Config{
|
||||
ValidateTxGasLimit: gasLimit,
|
||||
@ -786,6 +785,7 @@ func TestOptimisticExecution(t *testing.T) {
|
||||
Txs: ppReq.Txs,
|
||||
}
|
||||
fbResp, err := c.FinalizeBlock(context.Background(), fbReq)
|
||||
require.Nil(t, fbResp)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "test error") // from optimisticMockFunc
|
||||
require.Equal(t, 1, calledTimes)
|
||||
|
||||
@ -39,7 +39,7 @@ func NewMockReader(v uint64, rs *MockStore, actor []byte) *MockReader {
|
||||
}
|
||||
|
||||
func (roa *MockReader) Has(key []byte) (bool, error) {
|
||||
val, err := roa.store.GetStateStorage().Has(roa.actor, roa.version, key)
|
||||
val, err := roa.store.GetStateCommitment().Has(roa.actor, roa.version, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -48,7 +48,7 @@ func (roa *MockReader) Has(key []byte) (bool, error) {
|
||||
}
|
||||
|
||||
func (roa *MockReader) Get(key []byte) ([]byte, error) {
|
||||
result, err := roa.store.GetStateStorage().Get(roa.actor, roa.version, key)
|
||||
result, err := roa.store.GetStateCommitment().Get(roa.actor, roa.version, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -57,9 +57,9 @@ func (roa *MockReader) Get(key []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (roa *MockReader) Iterator(start, end []byte) (corestore.Iterator, error) {
|
||||
return roa.store.GetStateStorage().Iterator(roa.actor, roa.version, start, end)
|
||||
return roa.store.GetStateCommitment().Iterator(roa.actor, roa.version, start, end)
|
||||
}
|
||||
|
||||
func (roa *MockReader) ReverseIterator(start, end []byte) (corestore.Iterator, error) {
|
||||
return roa.store.GetStateStorage().ReverseIterator(roa.actor, roa.version, start, end)
|
||||
return roa.store.GetStateCommitment().ReverseIterator(roa.actor, roa.version, start, end)
|
||||
}
|
||||
|
||||
@ -11,21 +11,12 @@ import (
|
||||
"cosmossdk.io/store/v2/commitment/iavl"
|
||||
dbm "cosmossdk.io/store/v2/db"
|
||||
"cosmossdk.io/store/v2/proof"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
)
|
||||
|
||||
type MockStore struct {
|
||||
Storage storev2.VersionedWriter
|
||||
Committer storev2.Committer
|
||||
}
|
||||
|
||||
func NewMockStorage(logger log.Logger, dir string) storev2.VersionedWriter {
|
||||
storageDB, _ := pebbledb.New(dir)
|
||||
ss := storage.NewStorageStore(storageDB, logger)
|
||||
return ss
|
||||
}
|
||||
|
||||
func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer {
|
||||
treeMap := make(map[string]commitment.Tree)
|
||||
for _, actor := range actors {
|
||||
@ -36,8 +27,8 @@ func NewMockCommiter(logger log.Logger, actors ...string) storev2.Committer {
|
||||
return sc
|
||||
}
|
||||
|
||||
func NewMockStore(ss storev2.VersionedWriter, sc storev2.Committer) *MockStore {
|
||||
return &MockStore{Storage: ss, Committer: sc}
|
||||
func NewMockStore(sc storev2.Committer) *MockStore {
|
||||
return &MockStore{Committer: sc}
|
||||
}
|
||||
|
||||
func (s *MockStore) GetLatestVersion() (uint64, error) {
|
||||
@ -59,12 +50,7 @@ func (s *MockStore) StateLatest() (uint64, corestore.ReaderMap, error) {
|
||||
}
|
||||
|
||||
func (s *MockStore) Commit(changeset *corestore.Changeset) (corestore.Hash, error) {
|
||||
err := s.Storage.ApplyChangeset(changeset)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
err = s.Committer.WriteChangeset(changeset)
|
||||
err := s.Committer.WriteChangeset(changeset)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
@ -81,10 +67,6 @@ func (s *MockStore) StateAt(version uint64) (corestore.ReaderMap, error) {
|
||||
return NewMockReaderMap(version, s), nil
|
||||
}
|
||||
|
||||
func (s *MockStore) GetStateStorage() storev2.VersionedWriter {
|
||||
return s.Storage
|
||||
}
|
||||
|
||||
func (s *MockStore) GetStateCommitment() storev2.Committer {
|
||||
return s.Committer
|
||||
}
|
||||
|
||||
@ -127,7 +127,6 @@ func New[T transaction.Tx](
|
||||
indexEvents[e] = struct{}{}
|
||||
}
|
||||
|
||||
ss := store.GetStateStorage().(snapshots.StorageSnapshotter)
|
||||
sc := store.GetStateCommitment().(snapshots.CommitSnapshotter)
|
||||
|
||||
snapshotStore, err := GetSnapshotStore(srv.config.ConfigTomlConfig.RootDir)
|
||||
@ -155,7 +154,6 @@ func New[T transaction.Tx](
|
||||
snapshotStore,
|
||||
srv.serverOptions.SnapshotOptions(cfg),
|
||||
sc,
|
||||
ss,
|
||||
nil, // extensions snapshotter registered below
|
||||
logger,
|
||||
)
|
||||
|
||||
@ -105,6 +105,7 @@ func Benchmark_Iterate(b *testing.B) {
|
||||
|
||||
// makeBranchStack creates a branch stack of the given size and initializes it with unique key-value pairs.
|
||||
func makeBranchStack(b *testing.B, stackSize int) Store[store.KVStore] {
|
||||
b.Helper()
|
||||
parent := coretesting.NewMemKV()
|
||||
branch := NewStore[store.KVStore](parent)
|
||||
for i := 1; i < stackSize; i++ {
|
||||
|
||||
@ -375,10 +375,11 @@ func createSnapshotsManager(
|
||||
}
|
||||
|
||||
sm := snapshots.NewManager(
|
||||
snapshotStore, snapshots.NewSnapshotOptions(interval, uint32(keepRecent)),
|
||||
snapshotStore,
|
||||
snapshots.NewSnapshotOptions(interval, uint32(keepRecent)),
|
||||
store.GetStateCommitment().(snapshots.CommitSnapshotter),
|
||||
store.GetStateStorage().(snapshots.StorageSnapshotter),
|
||||
nil, logger)
|
||||
nil,
|
||||
logger)
|
||||
return sm, nil
|
||||
}
|
||||
|
||||
|
||||
@ -79,8 +79,6 @@ func TestLoadStore(t *testing.T) {
|
||||
cIDHp := types.CommitID{Version: verHp, Hash: hash}
|
||||
require.Nil(t, err)
|
||||
|
||||
// TODO: Prune this height
|
||||
|
||||
// Create current height Hc
|
||||
updated, err = tree.Set([]byte("hello"), []byte("ciao"))
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -83,6 +83,16 @@ func (t *IavlTree) Commit() ([]byte, uint64, error) {
|
||||
|
||||
// GetProof returns a proof for the given key and version.
|
||||
func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) {
|
||||
// the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed
|
||||
// by checking the latest version we can determine if we are in genesis or have a key that has been removed
|
||||
lv, err := t.tree.GetLatestVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lv == 0 {
|
||||
return t.tree.GetProof(key)
|
||||
}
|
||||
|
||||
immutableTree, err := t.tree.GetImmutable(int64(version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err)
|
||||
@ -93,6 +103,16 @@ func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof,
|
||||
|
||||
// Get implements the Reader interface.
|
||||
func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) {
|
||||
// the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not but the immutable tree is not empty when the storekey is removed
|
||||
// by checking the latest version we can determine if we are in genesis or have a key that has been removed
|
||||
lv, err := t.tree.GetLatestVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lv == 0 {
|
||||
return t.tree.Get(key)
|
||||
}
|
||||
|
||||
immutableTree, err := t.tree.GetImmutable(int64(version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err)
|
||||
@ -103,6 +123,16 @@ func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) {
|
||||
|
||||
// Iterator implements the Reader interface.
|
||||
func (t *IavlTree) Iterator(version uint64, start, end []byte, ascending bool) (corestore.Iterator, error) {
|
||||
// the mutable tree is empty at genesis & when the storekey is removed, but the immutable tree is not empty when the storekey is removed
|
||||
// by checking the latest version we can determine if we are in genesis or have a key that has been removed
|
||||
lv, err := t.tree.GetLatestVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if lv == 0 {
|
||||
return t.tree.Iterator(start, end, ascending)
|
||||
}
|
||||
|
||||
immutableTree, err := t.tree.GetImmutable(int64(version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err)
|
||||
|
||||
@ -233,6 +233,7 @@ func (c *CommitStore) SetInitialVersion(version uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetProof returns a proof for the given key and version.
|
||||
func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) {
|
||||
rawStoreKey := conv.UnsafeBytesToStr(storeKey)
|
||||
tree, ok := c.multiTrees[rawStoreKey]
|
||||
@ -268,8 +269,12 @@ func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]p
|
||||
// WARNING: This function is only used during the migration process. The SC layer
|
||||
// generally does not provide a reader for the CommitStore.
|
||||
func (c *CommitStore) getReader(storeKey string) (Reader, error) {
|
||||
tree, ok := c.multiTrees[storeKey]
|
||||
if !ok {
|
||||
var tree Tree
|
||||
if storeTree, ok := c.oldTrees[storeKey]; ok {
|
||||
tree = storeTree
|
||||
} else if storeTree, ok := c.multiTrees[storeKey]; ok {
|
||||
tree = storeTree
|
||||
} else {
|
||||
return nil, fmt.Errorf("store %s not found", storeKey)
|
||||
}
|
||||
|
||||
@ -283,6 +288,14 @@ func (c *CommitStore) getReader(storeKey string) (Reader, error) {
|
||||
|
||||
// VersionExists implements store.VersionedReader.
|
||||
func (c *CommitStore) VersionExists(version uint64) (bool, error) {
|
||||
latestVersion, err := c.metadata.GetLatestVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if latestVersion == 0 {
|
||||
return version == 0, nil
|
||||
}
|
||||
|
||||
ci, err := c.metadata.GetCommitInfo(version)
|
||||
return ci != nil, err
|
||||
}
|
||||
@ -435,12 +448,10 @@ func (c *CommitStore) Restore(
|
||||
version uint64,
|
||||
format uint32,
|
||||
protoReader protoio.Reader,
|
||||
chStorage chan<- *corestore.StateChanges,
|
||||
) (snapshotstypes.SnapshotItem, error) {
|
||||
var (
|
||||
importer Importer
|
||||
snapshotItem snapshotstypes.SnapshotItem
|
||||
storeKey []byte
|
||||
)
|
||||
|
||||
loop:
|
||||
@ -463,8 +474,6 @@ loop:
|
||||
return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to close importer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
storeKey = []byte(item.Store.Name)
|
||||
tree := c.multiTrees[item.Store.Name]
|
||||
if tree == nil {
|
||||
return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", item.Store.Name)
|
||||
@ -493,17 +502,6 @@ loop:
|
||||
if node.Value == nil {
|
||||
node.Value = []byte{}
|
||||
}
|
||||
|
||||
// If the node is a leaf node, it will be written to the storage.
|
||||
chStorage <- &corestore.StateChanges{
|
||||
Actor: storeKey,
|
||||
StateChanges: []corestore.KVPair{
|
||||
{
|
||||
Key: node.Key,
|
||||
Value: node.Value,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
err := importer.Add(node)
|
||||
if err != nil {
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
@ -32,6 +31,40 @@ type CommitStoreTestSuite struct {
|
||||
TreeType string
|
||||
}
|
||||
|
||||
// TestStore_Snapshotter tests the snapshot functionality of the CommitStore.
|
||||
// This test verifies that the store can correctly create snapshots and restore from them.
|
||||
// The test follows these steps:
|
||||
//
|
||||
// 1. Setup & Data Population:
|
||||
// - Creates a new CommitStore with two stores (store1 and store2)
|
||||
// - Writes 10 versions of data (version 1-10)
|
||||
// - For each version, writes 10 key-value pairs to each store
|
||||
// - Total data: 2 stores * 10 versions * 10 pairs = 200 key-value pairs
|
||||
// - Keys are formatted as "key-{version}-{index}"
|
||||
// - Values are formatted as "value-{version}-{index}"
|
||||
// - Each version is committed to get a CommitInfo
|
||||
//
|
||||
// 2. Snapshot Creation:
|
||||
// - Creates a dummy extension item for metadata testing
|
||||
// - Sets up a new target store for restoration
|
||||
// - Creates a channel for snapshot chunks
|
||||
// - Launches a goroutine to:
|
||||
// - Create a snapshot writer
|
||||
// - Take a snapshot at version 10
|
||||
// - Write extension metadata
|
||||
//
|
||||
// 3. Snapshot Restoration:
|
||||
// - Creates a snapshot reader from the chunks
|
||||
// - Sets up a channel for state changes during restoration
|
||||
// - Launches a goroutine to collect restored key-value pairs
|
||||
// - Restores the snapshot into the target store
|
||||
// - Verifies the extension metadata was preserved
|
||||
//
|
||||
// 4. Verification:
|
||||
// - Confirms all 200 key-value pairs were restored correctly
|
||||
// - Verifies the format: "{storeKey}_key-{version}-{index}" -> "value-{version}-{index}"
|
||||
// - Checks that the restored store's Merkle tree hashes match the original
|
||||
// - Ensures store integrity by comparing CommitInfo hashes
|
||||
func (s *CommitStoreTestSuite) TestStore_Snapshotter() {
|
||||
if s.TreeType == "iavlv2" {
|
||||
s.T().Skip("FIXME: iavlv2 does not yet support snapshots")
|
||||
@ -40,21 +73,26 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() {
|
||||
commitStore, err := s.NewStore(dbm.NewMemDB(), s.T().TempDir(), storeKeys, nil, coretesting.NewNopLogger())
|
||||
s.Require().NoError(err)
|
||||
|
||||
// We'll create 10 versions of data
|
||||
latestVersion := uint64(10)
|
||||
kvCount := 10
|
||||
var cInfo *proof.CommitInfo
|
||||
|
||||
// For each version 1-10
|
||||
for i := uint64(1); i <= latestVersion; i++ {
|
||||
// Create KV pairs for each store
|
||||
kvPairs := make(map[string]corestore.KVPairs)
|
||||
for _, storeKey := range storeKeys {
|
||||
kvPairs[storeKey] = corestore.KVPairs{}
|
||||
// Create 10 KV pairs for this store
|
||||
for j := 0; j < kvCount; j++ {
|
||||
key := []byte(fmt.Sprintf("key-%d-%d", i, j))
|
||||
value := []byte(fmt.Sprintf("value-%d-%d", i, j))
|
||||
kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value})
|
||||
}
|
||||
}
|
||||
// Write and commit the changes for this version
|
||||
s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(i, kvPairs)))
|
||||
|
||||
cInfo, err = commitStore.Commit(i)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
@ -88,34 +126,11 @@ func (s *CommitStoreTestSuite) TestStore_Snapshotter() {
|
||||
|
||||
streamReader, err := snapshots.NewStreamReader(chunks)
|
||||
s.Require().NoError(err)
|
||||
chStorage := make(chan *corestore.StateChanges, 100)
|
||||
leaves := make(map[string]string)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
for kv := range chStorage {
|
||||
for _, actor := range kv.StateChanges {
|
||||
leaves[fmt.Sprintf("%s_%s", kv.Actor, actor.Key)] = string(actor.Value)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader, chStorage)
|
||||
|
||||
nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(*dummyExtensionItem.GetExtension(), *nextItem.GetExtension())
|
||||
|
||||
close(chStorage)
|
||||
wg.Wait()
|
||||
s.Require().Equal(len(storeKeys)*kvCount*int(latestVersion), len(leaves))
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 1; i <= int(latestVersion); i++ {
|
||||
for j := 0; j < kvCount; j++ {
|
||||
key := fmt.Sprintf("%s_key-%d-%d", storeKey, i, j)
|
||||
s.Require().Equal(leaves[key], fmt.Sprintf("value-%d-%d", i, j))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check the restored tree hash
|
||||
targetCommitInfo, err := targetStore.GetCommitInfo(latestVersion)
|
||||
s.Require().NoError(err)
|
||||
|
||||
@ -7,19 +7,6 @@ import (
|
||||
"cosmossdk.io/store/v2/proof"
|
||||
)
|
||||
|
||||
// VersionedWriter defines an API for a versioned database that allows reads,
|
||||
// writes, iteration and commitment over a series of versions.
|
||||
type VersionedWriter interface {
|
||||
VersionedReader
|
||||
|
||||
SetLatestVersion(version uint64) error
|
||||
ApplyChangeset(cs *corestore.Changeset) error
|
||||
|
||||
// Closer releases associated resources. It should NOT be idempotent. It must
|
||||
// only be called once and any call after may panic.
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type VersionedReader interface {
|
||||
Has(storeKey []byte, version uint64, key []byte) (bool, error)
|
||||
Get(storeKey []byte, version uint64, key []byte) ([]byte, error)
|
||||
@ -41,6 +28,8 @@ type UpgradableDatabase interface {
|
||||
|
||||
// Committer defines an API for committing state.
|
||||
type Committer interface {
|
||||
UpgradeableStore
|
||||
VersionedReader
|
||||
// WriteChangeset writes the changeset to the commitment state.
|
||||
WriteChangeset(cs *corestore.Changeset) error
|
||||
|
||||
|
||||
@ -108,4 +108,4 @@ This limitation should be clearly understood before starting the migration proce
|
||||
especially if the node relies on historical data for any operations.
|
||||
|
||||
If historical queries are required, users must fully migrate all historical data to `store/v2`.
|
||||
Alternatively, keeping store/v1 accessible for historical queries could be an option.
|
||||
Alternatively, keeping store/v1 accessible for historical queries could be an option.
|
||||
|
||||
@ -4,8 +4,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -15,15 +14,11 @@ import (
|
||||
"cosmossdk.io/store/v2/commitment"
|
||||
"cosmossdk.io/store/v2/internal/encoding"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
snapshotstypes "cosmossdk.io/store/v2/snapshots/types"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultChannelBufferSize is the default buffer size for the migration stream.
|
||||
defaultChannelBufferSize = 1024
|
||||
// defaultStorageBufferSize is the default buffer size for the storage snapshotter.
|
||||
defaultStorageBufferSize = 1024
|
||||
|
||||
migrateChangesetKeyFmt = "m/cs_%x" // m/cs_<version>
|
||||
)
|
||||
@ -39,12 +34,11 @@ type Manager struct {
|
||||
logger log.Logger
|
||||
snapshotsManager *snapshots.Manager
|
||||
|
||||
stateStorage *storage.StorageStore
|
||||
stateCommitment *commitment.CommitStore
|
||||
|
||||
db corestore.KVStoreWithBatch
|
||||
mtx sync.Mutex // mutex for migratedVersion
|
||||
migratedVersion uint64
|
||||
db corestore.KVStoreWithBatch
|
||||
|
||||
migratedVersion atomic.Uint64
|
||||
|
||||
chChangeset <-chan *VersionedChangeset
|
||||
chDone <-chan struct{}
|
||||
@ -53,11 +47,10 @@ type Manager struct {
|
||||
// NewManager returns a new Manager.
|
||||
//
|
||||
// NOTE: `sc` can be `nil` if don't want to migrate the commitment.
|
||||
func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, ss *storage.StorageStore, sc *commitment.CommitStore, logger log.Logger) *Manager {
|
||||
func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, sc *commitment.CommitStore, logger log.Logger) *Manager {
|
||||
return &Manager{
|
||||
logger: logger,
|
||||
snapshotsManager: sm,
|
||||
stateStorage: ss,
|
||||
stateCommitment: sc,
|
||||
db: db,
|
||||
}
|
||||
@ -96,63 +89,14 @@ func (m *Manager) Migrate(height uint64) error {
|
||||
// create the migration stream and snapshot,
|
||||
// which acts as protoio.Reader and snapshots.WriteCloser.
|
||||
ms := NewMigrationStream(defaultChannelBufferSize)
|
||||
|
||||
if err := m.snapshotsManager.CreateMigration(height, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// restore the snapshot
|
||||
chStorage := make(chan *corestore.StateChanges, defaultStorageBufferSize)
|
||||
|
||||
eg := new(errgroup.Group)
|
||||
eg.Go(func() error {
|
||||
return m.stateStorage.Restore(height, chStorage)
|
||||
})
|
||||
eg.Go(func() error {
|
||||
defer close(chStorage)
|
||||
if m.stateCommitment != nil {
|
||||
if _, err := m.stateCommitment.Restore(height, 0, ms, chStorage); err != nil {
|
||||
return err
|
||||
}
|
||||
} else { // there is no commitment migration, just consume the stream to restore the state storage
|
||||
var storeKey []byte
|
||||
loop:
|
||||
for {
|
||||
snapshotItem := snapshotstypes.SnapshotItem{}
|
||||
err := ms.ReadMsg(&snapshotItem)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read snapshot item: %w", err)
|
||||
}
|
||||
switch item := snapshotItem.Item.(type) {
|
||||
case *snapshotstypes.SnapshotItem_Store:
|
||||
storeKey = []byte(item.Store.Name)
|
||||
case *snapshotstypes.SnapshotItem_IAVL:
|
||||
if item.IAVL.Height == 0 { // only restore the leaf nodes
|
||||
key := item.IAVL.Key
|
||||
if key == nil {
|
||||
key = []byte{}
|
||||
}
|
||||
value := item.IAVL.Value
|
||||
if value == nil {
|
||||
value = []byte{}
|
||||
}
|
||||
chStorage <- &corestore.StateChanges{
|
||||
Actor: storeKey,
|
||||
StateChanges: []corestore.KVPair{
|
||||
{
|
||||
Key: key,
|
||||
Value: value,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
if _, err := m.stateCommitment.Restore(height, 0, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@ -161,9 +105,7 @@ func (m *Manager) Migrate(height uint64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
m.migratedVersion = height
|
||||
m.mtx.Unlock()
|
||||
m.migratedVersion.Store(height)
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -207,9 +149,7 @@ func (m *Manager) writeChangeset() error {
|
||||
// GetMigratedVersion returns the migrated version.
|
||||
// It is used to check the migrated version in the RootStore.
|
||||
func (m *Manager) GetMigratedVersion() uint64 {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
return m.migratedVersion
|
||||
return m.migratedVersion.Load()
|
||||
}
|
||||
|
||||
// Sync catches up the Changesets which are committed while the migration is in progress.
|
||||
@ -251,13 +191,8 @@ func (m *Manager) Sync() error {
|
||||
return fmt.Errorf("failed to commit changeset to commitment: %w", err)
|
||||
}
|
||||
}
|
||||
if err := m.stateStorage.ApplyChangeset(cs); err != nil {
|
||||
return fmt.Errorf("failed to write changeset to storage: %w", err)
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
m.migratedVersion = version
|
||||
m.mtx.Unlock()
|
||||
m.migratedVersion.Store(version)
|
||||
|
||||
version += 1
|
||||
}
|
||||
|
||||
@ -13,13 +13,11 @@ import (
|
||||
"cosmossdk.io/store/v2/commitment/iavl"
|
||||
dbm "cosmossdk.io/store/v2/db"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
)
|
||||
|
||||
var storeKeys = []string{"store1", "store2"}
|
||||
|
||||
func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitment.CommitStore) {
|
||||
func setupMigrationManager(t *testing.T) (*Manager, *commitment.CommitStore) {
|
||||
t.Helper()
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
@ -28,18 +26,13 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm
|
||||
prefixDB := dbm.NewPrefixDB(db, []byte(storeKey))
|
||||
multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig())
|
||||
}
|
||||
|
||||
commitStore, err := commitment.NewCommitStore(multiTrees, nil, db, coretesting.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
snapshotsStore, err := snapshots.NewStore(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, nil, coretesting.NewNopLogger())
|
||||
|
||||
storageDB, err := pebbledb.New(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
newStorageStore := storage.NewStorageStore(storageDB, coretesting.NewNopLogger()) // for store/v2
|
||||
snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, coretesting.NewNopLogger())
|
||||
|
||||
db1 := dbm.NewMemDB()
|
||||
multiTrees1 := make(map[string]commitment.Tree)
|
||||
@ -50,171 +43,137 @@ func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitm
|
||||
|
||||
newCommitStore, err := commitment.NewCommitStore(multiTrees1, nil, db1, coretesting.NewNopLogger()) // for store/v2
|
||||
require.NoError(t, err)
|
||||
if noCommitStore {
|
||||
newCommitStore = nil
|
||||
}
|
||||
|
||||
return NewManager(db, snapshotsManager, newStorageStore, newCommitStore, coretesting.NewNopLogger()), commitStore
|
||||
return NewManager(db, snapshotsManager, newCommitStore, coretesting.NewNopLogger()), commitStore
|
||||
}
|
||||
|
||||
func TestMigrateState(t *testing.T) {
|
||||
for _, noCommitStore := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("Migrate noCommitStore=%v", noCommitStore), func(t *testing.T) {
|
||||
m, orgCommitStore := setupMigrationManager(t, noCommitStore)
|
||||
|
||||
// apply changeset
|
||||
toVersion := uint64(100)
|
||||
keyCount := 10
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
cs := corestore.NewChangeset(version)
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false)
|
||||
}
|
||||
}
|
||||
require.NoError(t, orgCommitStore.WriteChangeset(cs))
|
||||
_, err := orgCommitStore.Commit(version)
|
||||
require.NoError(t, err)
|
||||
m, orgCommitStore := setupMigrationManager(t)
|
||||
// apply changeset
|
||||
toVersion := uint64(100)
|
||||
keyCount := 10
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
cs := corestore.NewChangeset(version)
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false)
|
||||
}
|
||||
}
|
||||
require.NoError(t, orgCommitStore.WriteChangeset(cs))
|
||||
_, err := orgCommitStore.Commit(version)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := m.Migrate(toVersion - 1)
|
||||
require.NoError(t, err)
|
||||
err := m.Migrate(toVersion - 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// expecting error for conflicting process, since Migrate trigger snapshotter create migration,
|
||||
// which start a snapshot process already.
|
||||
_, err = m.snapshotsManager.Create(toVersion - 1)
|
||||
require.Error(t, err)
|
||||
// expecting error for conflicting process, since Migrate trigger snapshotter create migration,
|
||||
// which start a snapshot process already.
|
||||
_, err = m.snapshotsManager.Create(toVersion - 1)
|
||||
fmt.Println(1)
|
||||
require.Error(t, err)
|
||||
|
||||
if m.stateCommitment != nil {
|
||||
// check the migrated state
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
// check the latest state
|
||||
val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1"))
|
||||
// check the migrated state
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
}
|
||||
|
||||
// check the storage
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.stateStorage.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
// check the latest state
|
||||
val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartMigrateState(t *testing.T) {
|
||||
for _, noCommitStore := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("Migrate noCommitStore=%v", noCommitStore), func(t *testing.T) {
|
||||
m, orgCommitStore := setupMigrationManager(t, noCommitStore)
|
||||
m, orgCommitStore := setupMigrationManager(t)
|
||||
|
||||
chDone := make(chan struct{})
|
||||
chChangeset := make(chan *VersionedChangeset, 1)
|
||||
chDone := make(chan struct{})
|
||||
chChangeset := make(chan *VersionedChangeset, 1)
|
||||
|
||||
// apply changeset
|
||||
toVersion := uint64(10)
|
||||
keyCount := 5
|
||||
changesets := []corestore.Changeset{}
|
||||
// apply changeset
|
||||
toVersion := uint64(10)
|
||||
keyCount := 5
|
||||
changesets := []corestore.Changeset{}
|
||||
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
cs := corestore.NewChangeset(version)
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false)
|
||||
}
|
||||
}
|
||||
changesets = append(changesets, *cs)
|
||||
require.NoError(t, orgCommitStore.WriteChangeset(cs))
|
||||
_, err := orgCommitStore.Commit(version)
|
||||
require.NoError(t, err)
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
cs := corestore.NewChangeset(version)
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false)
|
||||
}
|
||||
}
|
||||
changesets = append(changesets, *cs)
|
||||
require.NoError(t, orgCommitStore.WriteChangeset(cs))
|
||||
_, err := orgCommitStore.Commit(version)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// feed changesets to channel
|
||||
go func() {
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
chChangeset <- &VersionedChangeset{
|
||||
Version: version,
|
||||
Changeset: &changesets[version-1],
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// check if migrate process complete
|
||||
go func() {
|
||||
for {
|
||||
migrateVersion := m.GetMigratedVersion()
|
||||
if migrateVersion == toVersion-1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
chDone <- struct{}{}
|
||||
}()
|
||||
|
||||
err := m.Start(toVersion-1, chChangeset, chDone)
|
||||
require.NoError(t, err)
|
||||
|
||||
// expecting error for conflicting process, since Migrate trigger snapshotter create migration,
|
||||
// which start a snapshot process already.
|
||||
_, err = m.snapshotsManager.Create(toVersion - 1)
|
||||
require.Error(t, err)
|
||||
|
||||
if m.stateCommitment != nil {
|
||||
// check the migrated state
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
// check the latest state
|
||||
val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
// feed changesets to channel
|
||||
go func() {
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
chChangeset <- &VersionedChangeset{
|
||||
Version: version,
|
||||
Changeset: &changesets[version-1],
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// check the storage
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.stateStorage.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
// check if migrate process complete
|
||||
go func() {
|
||||
for {
|
||||
migrateVersion := m.GetMigratedVersion()
|
||||
if migrateVersion == toVersion-1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
chDone <- struct{}{}
|
||||
}()
|
||||
|
||||
err := m.Start(toVersion-1, chChangeset, chDone)
|
||||
require.NoError(t, err)
|
||||
|
||||
// expecting error for conflicting process, since Migrate trigger snapshotter create migration,
|
||||
// which start a snapshot process already.
|
||||
_, err = m.snapshotsManager.Create(toVersion - 1)
|
||||
require.Error(t, err)
|
||||
|
||||
if m.stateCommitment != nil {
|
||||
// check the migrated state
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val)
|
||||
}
|
||||
}
|
||||
}
|
||||
// check the latest state
|
||||
val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0"))
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, val)
|
||||
}
|
||||
|
||||
// check if migration db write change set to storage
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, version)
|
||||
csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf))
|
||||
csVal, err := m.db.Get(csKey)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, csVal)
|
||||
}
|
||||
})
|
||||
// check if migration db write change set to storage
|
||||
for version := uint64(1); version < toVersion; version++ {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, version)
|
||||
csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf))
|
||||
csVal, err := m.db.Get(csKey)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, csVal)
|
||||
}
|
||||
}
|
||||
|
||||
@ -130,6 +130,36 @@ func (mr *MockStateCommitterMockRecorder) GetProof(storeKey, version, key any) *
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProof", reflect.TypeOf((*MockStateCommitter)(nil).GetProof), storeKey, version, key)
|
||||
}
|
||||
|
||||
// Has mocks base method.
|
||||
func (m *MockStateCommitter) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Has", storeKey, version, key)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Has indicates an expected call of Has.
|
||||
func (mr *MockStateCommitterMockRecorder) Has(storeKey, version, key any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateCommitter)(nil).Has), storeKey, version, key)
|
||||
}
|
||||
|
||||
// Iterator mocks base method.
|
||||
func (m *MockStateCommitter) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end)
|
||||
ret0, _ := ret[0].(store.Iterator)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Iterator indicates an expected call of Iterator.
|
||||
func (mr *MockStateCommitterMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateCommitter)(nil).Iterator), storeKey, version, start, end)
|
||||
}
|
||||
|
||||
// LoadVersion mocks base method.
|
||||
func (m *MockStateCommitter) LoadVersion(targetVersion uint64) error {
|
||||
m.ctrl.T.Helper()
|
||||
@ -198,6 +228,35 @@ func (mr *MockStateCommitterMockRecorder) Prune(version any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateCommitter)(nil).Prune), version)
|
||||
}
|
||||
|
||||
// PruneStoreKeys mocks base method.
|
||||
func (m *MockStateCommitter) PruneStoreKeys(storeKeys []string, version uint64) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PruneStoreKeys", storeKeys, version)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// PruneStoreKeys indicates an expected call of PruneStoreKeys.
|
||||
func (mr *MockStateCommitterMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateCommitter)(nil).PruneStoreKeys), storeKeys, version)
|
||||
}
|
||||
|
||||
// ReverseIterator mocks base method.
|
||||
func (m *MockStateCommitter) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ReverseIterator", storeKey, version, start, end)
|
||||
ret0, _ := ret[0].(store.Iterator)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ReverseIterator indicates an expected call of ReverseIterator.
|
||||
func (mr *MockStateCommitterMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateCommitter)(nil).ReverseIterator), storeKey, version, start, end)
|
||||
}
|
||||
|
||||
// SetInitialVersion mocks base method.
|
||||
func (m *MockStateCommitter) SetInitialVersion(version uint64) error {
|
||||
m.ctrl.T.Helper()
|
||||
@ -212,6 +271,21 @@ func (mr *MockStateCommitterMockRecorder) SetInitialVersion(version any) *gomock
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialVersion", reflect.TypeOf((*MockStateCommitter)(nil).SetInitialVersion), version)
|
||||
}
|
||||
|
||||
// VersionExists mocks base method.
|
||||
func (m *MockStateCommitter) VersionExists(v uint64) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "VersionExists", v)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// VersionExists indicates an expected call of VersionExists.
|
||||
func (mr *MockStateCommitterMockRecorder) VersionExists(v any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateCommitter)(nil).VersionExists), v)
|
||||
}
|
||||
|
||||
// WriteChangeset mocks base method.
|
||||
func (m *MockStateCommitter) WriteChangeset(cs *store.Changeset) error {
|
||||
m.ctrl.T.Helper()
|
||||
@ -225,199 +299,3 @@ func (mr *MockStateCommitterMockRecorder) WriteChangeset(cs any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteChangeset", reflect.TypeOf((*MockStateCommitter)(nil).WriteChangeset), cs)
|
||||
}
|
||||
|
||||
// MockStateStorage is a mock of StateStorage interface.
|
||||
type MockStateStorage struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockStateStorageMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockStateStorageMockRecorder is the mock recorder for MockStateStorage.
|
||||
type MockStateStorageMockRecorder struct {
|
||||
mock *MockStateStorage
|
||||
}
|
||||
|
||||
// NewMockStateStorage creates a new mock instance.
|
||||
func NewMockStateStorage(ctrl *gomock.Controller) *MockStateStorage {
|
||||
mock := &MockStateStorage{ctrl: ctrl}
|
||||
mock.recorder = &MockStateStorageMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockStateStorage) EXPECT() *MockStateStorageMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// ApplyChangeset mocks base method.
|
||||
func (m *MockStateStorage) ApplyChangeset(cs *store.Changeset) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ApplyChangeset", cs)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ApplyChangeset indicates an expected call of ApplyChangeset.
|
||||
func (mr *MockStateStorageMockRecorder) ApplyChangeset(cs any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyChangeset", reflect.TypeOf((*MockStateStorage)(nil).ApplyChangeset), cs)
|
||||
}
|
||||
|
||||
// Close mocks base method.
|
||||
func (m *MockStateStorage) Close() error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Close")
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Close indicates an expected call of Close.
|
||||
func (mr *MockStateStorageMockRecorder) Close() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockStateStorage)(nil).Close))
|
||||
}
|
||||
|
||||
// Get mocks base method.
|
||||
func (m *MockStateStorage) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Get", storeKey, version, key)
|
||||
ret0, _ := ret[0].([]byte)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Get indicates an expected call of Get.
|
||||
func (mr *MockStateStorageMockRecorder) Get(storeKey, version, key any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockStateStorage)(nil).Get), storeKey, version, key)
|
||||
}
|
||||
|
||||
// GetLatestVersion mocks base method.
|
||||
func (m *MockStateStorage) GetLatestVersion() (uint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetLatestVersion")
|
||||
ret0, _ := ret[0].(uint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetLatestVersion indicates an expected call of GetLatestVersion.
|
||||
func (mr *MockStateStorageMockRecorder) GetLatestVersion() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestVersion", reflect.TypeOf((*MockStateStorage)(nil).GetLatestVersion))
|
||||
}
|
||||
|
||||
// Has mocks base method.
|
||||
func (m *MockStateStorage) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Has", storeKey, version, key)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Has indicates an expected call of Has.
|
||||
func (mr *MockStateStorageMockRecorder) Has(storeKey, version, key any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockStateStorage)(nil).Has), storeKey, version, key)
|
||||
}
|
||||
|
||||
// Iterator mocks base method.
|
||||
func (m *MockStateStorage) Iterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Iterator", storeKey, version, start, end)
|
||||
ret0, _ := ret[0].(store.Iterator)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Iterator indicates an expected call of Iterator.
|
||||
func (mr *MockStateStorageMockRecorder) Iterator(storeKey, version, start, end any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockStateStorage)(nil).Iterator), storeKey, version, start, end)
|
||||
}
|
||||
|
||||
// PausePruning mocks base method.
|
||||
func (m *MockStateStorage) PausePruning(pause bool) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "PausePruning", pause)
|
||||
}
|
||||
|
||||
// PausePruning indicates an expected call of PausePruning.
|
||||
func (mr *MockStateStorageMockRecorder) PausePruning(pause any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PausePruning", reflect.TypeOf((*MockStateStorage)(nil).PausePruning), pause)
|
||||
}
|
||||
|
||||
// Prune mocks base method.
|
||||
func (m *MockStateStorage) Prune(version uint64) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Prune", version)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Prune indicates an expected call of Prune.
|
||||
func (mr *MockStateStorageMockRecorder) Prune(version any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockStateStorage)(nil).Prune), version)
|
||||
}
|
||||
|
||||
// PruneStoreKeys mocks base method.
|
||||
func (m *MockStateStorage) PruneStoreKeys(storeKeys []string, version uint64) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "PruneStoreKeys", storeKeys, version)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// PruneStoreKeys indicates an expected call of PruneStoreKeys.
|
||||
func (mr *MockStateStorageMockRecorder) PruneStoreKeys(storeKeys, version any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneStoreKeys", reflect.TypeOf((*MockStateStorage)(nil).PruneStoreKeys), storeKeys, version)
|
||||
}
|
||||
|
||||
// ReverseIterator mocks base method.
|
||||
func (m *MockStateStorage) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (store.Iterator, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ReverseIterator", storeKey, version, start, end)
|
||||
ret0, _ := ret[0].(store.Iterator)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ReverseIterator indicates an expected call of ReverseIterator.
|
||||
func (mr *MockStateStorageMockRecorder) ReverseIterator(storeKey, version, start, end any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockStateStorage)(nil).ReverseIterator), storeKey, version, start, end)
|
||||
}
|
||||
|
||||
// SetLatestVersion mocks base method.
|
||||
func (m *MockStateStorage) SetLatestVersion(version uint64) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "SetLatestVersion", version)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// SetLatestVersion indicates an expected call of SetLatestVersion.
|
||||
func (mr *MockStateStorageMockRecorder) SetLatestVersion(version any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestVersion", reflect.TypeOf((*MockStateStorage)(nil).SetLatestVersion), version)
|
||||
}
|
||||
|
||||
// VersionExists mocks base method.
|
||||
func (m *MockStateStorage) VersionExists(v uint64) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "VersionExists", v)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// VersionExists indicates an expected call of VersionExists.
|
||||
func (mr *MockStateStorageMockRecorder) VersionExists(v any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VersionExists", reflect.TypeOf((*MockStateStorage)(nil).VersionExists), v)
|
||||
}
|
||||
|
||||
@ -8,12 +8,6 @@ type StateCommitter interface {
|
||||
store.Pruner
|
||||
store.PausablePruner
|
||||
store.UpgradeableStore
|
||||
}
|
||||
|
||||
// StateStorage is a mock of store.VersionedWriter
|
||||
type StateStorage interface {
|
||||
store.VersionedWriter
|
||||
store.VersionedReader
|
||||
store.UpgradableDatabase
|
||||
store.Pruner
|
||||
store.PausablePruner
|
||||
}
|
||||
|
||||
@ -10,19 +10,13 @@ type Manager struct {
|
||||
scPruner store.Pruner
|
||||
// scPruningOption are the pruning options for the SC.
|
||||
scPruningOption *store.PruningOption
|
||||
// ssPruner is the pruner for the SS.
|
||||
ssPruner store.Pruner
|
||||
// ssPruningOption are the pruning options for the SS.
|
||||
ssPruningOption *store.PruningOption
|
||||
}
|
||||
|
||||
// NewManager creates a new Pruning Manager.
|
||||
func NewManager(scPruner, ssPruner store.Pruner, scPruningOption, ssPruningOption *store.PruningOption) *Manager {
|
||||
func NewManager(scPruner store.Pruner, scPruningOption *store.PruningOption) *Manager {
|
||||
return &Manager{
|
||||
scPruner: scPruner,
|
||||
scPruningOption: scPruningOption,
|
||||
ssPruner: ssPruner,
|
||||
ssPruningOption: ssPruningOption,
|
||||
}
|
||||
}
|
||||
|
||||
@ -39,15 +33,6 @@ func (m *Manager) Prune(version uint64) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the SS.
|
||||
if m.ssPruningOption != nil {
|
||||
if prune, pruneTo := m.ssPruningOption.ShouldPrune(version); prune {
|
||||
if err := m.ssPruner.Prune(pruneTo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -55,9 +40,6 @@ func (m *Manager) signalPruning(pause bool) {
|
||||
if scPausablePruner, ok := m.scPruner.(store.PausablePruner); ok {
|
||||
scPausablePruner.PausePruning(pause)
|
||||
}
|
||||
if ssPausablePruner, ok := m.ssPruner.(store.PausablePruner); ok {
|
||||
ssPausablePruner.PausePruning(pause)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) PausePruning() {
|
||||
|
||||
@ -14,8 +14,6 @@ import (
|
||||
"cosmossdk.io/store/v2/commitment"
|
||||
"cosmossdk.io/store/v2/commitment/iavl"
|
||||
dbm "cosmossdk.io/store/v2/db"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
)
|
||||
|
||||
var storeKeys = []string{"store1", "store2", "store3"}
|
||||
@ -25,7 +23,6 @@ type PruningManagerTestSuite struct {
|
||||
|
||||
manager *Manager
|
||||
sc *commitment.CommitStore
|
||||
ss *storage.StorageStore
|
||||
}
|
||||
|
||||
func TestPruningManagerTestSuite(t *testing.T) {
|
||||
@ -45,12 +42,8 @@ func (s *PruningManagerTestSuite) SetupTest() {
|
||||
s.sc, err = commitment.NewCommitStore(multiTrees, nil, mdb, nopLog)
|
||||
s.Require().NoError(err)
|
||||
|
||||
pebbleDB, err := pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
s.ss = storage.NewStorageStore(pebbleDB, nopLog)
|
||||
scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all
|
||||
ssPruningOption := store.NewPruningOptionWithCustom(5, 10) // prune some
|
||||
s.manager = NewManager(s.sc, s.ss, scPruningOption, ssPruningOption)
|
||||
scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all
|
||||
s.manager = NewManager(s.sc, scPruningOption)
|
||||
}
|
||||
|
||||
func (s *PruningManagerTestSuite) TestPrune() {
|
||||
@ -68,8 +61,6 @@ func (s *PruningManagerTestSuite) TestPrune() {
|
||||
_, err := s.sc.Commit(version)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.Require().NoError(s.ss.ApplyChangeset(cs))
|
||||
|
||||
s.Require().NoError(s.manager.Prune(version))
|
||||
}
|
||||
|
||||
@ -86,24 +77,6 @@ func (s *PruningManagerTestSuite) TestPrune() {
|
||||
return count == len(storeKeys)
|
||||
}
|
||||
s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second)
|
||||
|
||||
// check the storage store
|
||||
_, pruneVersion := s.manager.ssPruningOption.ShouldPrune(toVersion)
|
||||
for version := uint64(1); version <= toVersion; version++ {
|
||||
for _, storeKey := range storeKeys {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
key := []byte(fmt.Sprintf("key-%d-%d", version, i))
|
||||
value, err := s.ss.Get([]byte(storeKey), version, key)
|
||||
if version <= pruneVersion {
|
||||
s.Require().Nil(value)
|
||||
s.Require().Error(err)
|
||||
} else {
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruningOption(t *testing.T) {
|
||||
@ -164,8 +137,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() {
|
||||
_, err := s.sc.Commit(1)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.Require().NoError(s.ss.ApplyChangeset(cs))
|
||||
|
||||
// commit version 2
|
||||
for _, storeKey := range storeKeys {
|
||||
cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", 2, 0)), []byte(fmt.Sprintf("value-%d-%d", 2, 0)), false)
|
||||
@ -179,8 +150,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() {
|
||||
_, err = s.sc.Commit(2)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.Require().NoError(s.ss.ApplyChangeset(cs))
|
||||
|
||||
// try prune before signaling commit has finished
|
||||
s.Require().NoError(s.manager.Prune(2))
|
||||
|
||||
@ -238,7 +207,6 @@ func (s *PruningManagerTestSuite) TestSignalCommit() {
|
||||
_, err := s.sc.Commit(version)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.Require().NoError(s.ss.ApplyChangeset(cs))
|
||||
err = s.manager.ResumePruning(version)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package root
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"cosmossdk.io/core/log"
|
||||
corestore "cosmossdk.io/core/store"
|
||||
@ -14,28 +13,20 @@ import (
|
||||
"cosmossdk.io/store/v2/db"
|
||||
"cosmossdk.io/store/v2/internal"
|
||||
"cosmossdk.io/store/v2/pruning"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
"cosmossdk.io/store/v2/storage/rocksdb"
|
||||
)
|
||||
|
||||
type (
|
||||
SSType string
|
||||
SCType string
|
||||
)
|
||||
|
||||
const (
|
||||
SSTypePebble SSType = "pebble"
|
||||
SSTypeRocks SSType = "rocksdb"
|
||||
SCTypeIavl SCType = "iavl"
|
||||
SCTypeIavlV2 SCType = "iavl-v2"
|
||||
)
|
||||
|
||||
// Options are the options for creating a root store.
|
||||
type Options struct {
|
||||
SSType SSType `mapstructure:"ss-type" toml:"ss-type" comment:"State storage database type. Currently we support: \"pebble\" and \"rocksdb\""`
|
||||
SCType SCType `mapstructure:"sc-type" toml:"sc-type" comment:"State commitment database type. Currently we support: \"iavl\" and \"iavl-v2\""`
|
||||
SSPruningOption *store.PruningOption `mapstructure:"ss-pruning-option" toml:"ss-pruning-option" comment:"Pruning options for state storage"`
|
||||
SCPruningOption *store.PruningOption `mapstructure:"sc-pruning-option" toml:"sc-pruning-option" comment:"Pruning options for state commitment"`
|
||||
IavlConfig *iavl.Config `mapstructure:"iavl-config" toml:"iavl-config"`
|
||||
}
|
||||
@ -52,16 +43,11 @@ type FactoryOptions struct {
|
||||
// DefaultStoreOptions returns the default options for creating a root store.
|
||||
func DefaultStoreOptions() Options {
|
||||
return Options{
|
||||
SSType: SSTypePebble,
|
||||
SCType: SCTypeIavl,
|
||||
SCPruningOption: &store.PruningOption{
|
||||
KeepRecent: 2,
|
||||
Interval: 100,
|
||||
},
|
||||
SSPruningOption: &store.PruningOption{
|
||||
KeepRecent: 2,
|
||||
Interval: 100,
|
||||
},
|
||||
IavlConfig: &iavl.Config{
|
||||
CacheSize: 100_000,
|
||||
SkipFastStorageUpgrade: true,
|
||||
@ -75,39 +61,11 @@ func DefaultStoreOptions() Options {
|
||||
// necessary, but demonstrates the required steps and configuration to create a root store.
|
||||
func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) {
|
||||
var (
|
||||
ssDb storage.Database
|
||||
ss *storage.StorageStore
|
||||
sc *commitment.CommitStore
|
||||
err error
|
||||
ensureDir = func(dir string) error {
|
||||
if err := os.MkdirAll(dir, 0o0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", dir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
sc *commitment.CommitStore
|
||||
err error
|
||||
)
|
||||
|
||||
storeOpts := opts.Options
|
||||
switch storeOpts.SSType {
|
||||
case SSTypePebble:
|
||||
dir := fmt.Sprintf("%s/data/ss/pebble", opts.RootDir)
|
||||
if err = ensureDir(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ssDb, err = pebbledb.New(dir)
|
||||
case SSTypeRocks:
|
||||
dir := fmt.Sprintf("%s/data/ss/rocksdb", opts.RootDir)
|
||||
if err = ensureDir(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ssDb, err = rocksdb.New(dir)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown storage type: %s", opts.Options.SSType)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss = storage.NewStorageStore(ssDb, opts.Logger)
|
||||
|
||||
metadata := commitment.NewMetadataStore(opts.SCRawDB)
|
||||
latestVersion, err := metadata.GetLatestVersion()
|
||||
@ -168,6 +126,6 @@ func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pm := pruning.NewManager(sc, ss, storeOpts.SCPruningOption, storeOpts.SSPruningOption)
|
||||
return New(opts.SCRawDB, opts.Logger, ss, sc, pm, nil, nil)
|
||||
pm := pruning.NewManager(sc, storeOpts.SCPruningOption)
|
||||
return New(opts.SCRawDB, opts.Logger, sc, pm, nil, nil)
|
||||
}
|
||||
|
||||
@ -17,8 +17,6 @@ import (
|
||||
"cosmossdk.io/store/v2/migration"
|
||||
"cosmossdk.io/store/v2/pruning"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
)
|
||||
|
||||
var storeKeys = []string{"store1", "store2", "store3"}
|
||||
@ -61,11 +59,6 @@ func (s *MigrateStoreTestSuite) SetupTest() {
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
// create a new storage and commitment stores
|
||||
pebbleDB, err := pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
ss := storage.NewStorageStore(pebbleDB, testLog)
|
||||
|
||||
multiTrees1 := make(map[string]commitment.Tree)
|
||||
for _, storeKey := range storeKeys {
|
||||
multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig())
|
||||
@ -75,12 +68,12 @@ func (s *MigrateStoreTestSuite) SetupTest() {
|
||||
|
||||
snapshotsStore, err := snapshots.NewStore(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, nil, testLog)
|
||||
migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, ss, sc, testLog)
|
||||
pm := pruning.NewManager(sc, ss, nil, nil)
|
||||
snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, testLog)
|
||||
migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, sc, testLog)
|
||||
pm := pruning.NewManager(sc, nil)
|
||||
|
||||
// assume no storage store, simulate the migration process
|
||||
s.rootStore, err = New(dbm.NewMemDB(), testLog, ss, orgSC, pm, migrationManager, nil)
|
||||
s.rootStore, err = New(dbm.NewMemDB(), testLog, orgSC, pm, migrationManager, nil)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
@ -115,7 +108,7 @@ func (s *MigrateStoreTestSuite) TestMigrateState() {
|
||||
s.Require().NoError(err)
|
||||
|
||||
// check if the migration is completed
|
||||
ver, err := s.rootStore.GetStateStorage().GetLatestVersion()
|
||||
ver, err := s.rootStore.GetLatestVersion()
|
||||
s.Require().NoError(err)
|
||||
if ver == latestVersion {
|
||||
break
|
||||
|
||||
@ -34,9 +34,6 @@ type Store struct {
|
||||
// holds the db instance for closing it
|
||||
dbCloser io.Closer
|
||||
|
||||
// stateStorage reflects the state storage backend
|
||||
stateStorage store.VersionedWriter
|
||||
|
||||
// stateCommitment reflects the state commitment (SC) backend
|
||||
stateCommitment store.Committer
|
||||
|
||||
@ -67,7 +64,6 @@ type Store struct {
|
||||
func New(
|
||||
dbCloser io.Closer,
|
||||
logger corelog.Logger,
|
||||
ss store.VersionedWriter,
|
||||
sc store.Committer,
|
||||
pm *pruning.Manager,
|
||||
mm *migration.Manager,
|
||||
@ -76,7 +72,6 @@ func New(
|
||||
return &Store{
|
||||
dbCloser: dbCloser,
|
||||
logger: logger,
|
||||
stateStorage: ss,
|
||||
stateCommitment: sc,
|
||||
pruningManager: pm,
|
||||
migrationManager: mm,
|
||||
@ -88,11 +83,9 @@ func New(
|
||||
// Close closes the store and resets all internal fields. Note, Close() is NOT
|
||||
// idempotent and should only be called once.
|
||||
func (s *Store) Close() (err error) {
|
||||
err = errors.Join(err, s.stateStorage.Close())
|
||||
err = errors.Join(err, s.stateCommitment.Close())
|
||||
err = errors.Join(err, s.dbCloser.Close())
|
||||
|
||||
s.stateStorage = nil
|
||||
s.stateCommitment = nil
|
||||
s.lastCommitInfo = nil
|
||||
|
||||
@ -113,24 +106,13 @@ func (s *Store) SetInitialVersion(v uint64) error {
|
||||
// and the version exists in the state commitment, since the state storage will be
|
||||
// synced during migration.
|
||||
func (s *Store) getVersionedReader(version uint64) (store.VersionedReader, error) {
|
||||
isExist, err := s.stateStorage.VersionExists(version)
|
||||
isExist, err := s.stateCommitment.VersionExists(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isExist {
|
||||
return s.stateStorage, nil
|
||||
return s.stateCommitment, nil
|
||||
}
|
||||
|
||||
if vReader, ok := s.stateCommitment.(store.VersionedReader); ok {
|
||||
isExist, err := vReader.VersionExists(version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isExist {
|
||||
return vReader, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("version %d does not exist", version)
|
||||
}
|
||||
|
||||
@ -139,7 +121,6 @@ func (s *Store) StateLatest() (uint64, corestore.ReaderMap, error) {
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
vReader, err := s.getVersionedReader(v)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
@ -154,10 +135,6 @@ func (s *Store) StateAt(v uint64) (corestore.ReaderMap, error) {
|
||||
return NewReaderMap(v, vReader), err
|
||||
}
|
||||
|
||||
func (s *Store) GetStateStorage() store.VersionedWriter {
|
||||
return s.stateStorage
|
||||
}
|
||||
|
||||
func (s *Store) GetStateCommitment() store.Committer {
|
||||
return s.stateCommitment
|
||||
}
|
||||
@ -198,29 +175,9 @@ func (s *Store) Query(storeKey []byte, version uint64, key []byte, prove bool) (
|
||||
defer s.telemetry.MeasureSince(now, "root_store", "query")
|
||||
}
|
||||
|
||||
var val []byte
|
||||
var err error
|
||||
if s.isMigrating { // if we're migrating, we need to query the SC backend
|
||||
val, err = s.stateCommitment.Get(storeKey, version, key)
|
||||
if err != nil {
|
||||
return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err)
|
||||
}
|
||||
} else {
|
||||
val, err = s.stateStorage.Get(storeKey, version, key)
|
||||
if err != nil {
|
||||
return store.QueryResult{}, fmt.Errorf("failed to query SS store: %w", err)
|
||||
}
|
||||
if val == nil {
|
||||
// fallback to querying SC backend if not found in SS backend
|
||||
//
|
||||
// Note, this should only used during migration, i.e. while SS and IAVL v2
|
||||
// are being asynchronously synced.
|
||||
bz, scErr := s.stateCommitment.Get(storeKey, version, key)
|
||||
if scErr != nil {
|
||||
return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", scErr)
|
||||
}
|
||||
val = bz
|
||||
}
|
||||
val, err := s.stateCommitment.Get(storeKey, version, key)
|
||||
if err != nil {
|
||||
return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err)
|
||||
}
|
||||
|
||||
result := store.QueryResult{
|
||||
@ -291,15 +248,6 @@ func (s *Store) LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreU
|
||||
return err
|
||||
}
|
||||
|
||||
// if the state storage implements the UpgradableDatabase interface, prune the
|
||||
// deleted store keys
|
||||
upgradableDatabase, ok := s.stateStorage.(store.UpgradableDatabase)
|
||||
if ok {
|
||||
if err := upgradableDatabase.PruneStoreKeys(upgrades.Deleted, version); err != nil {
|
||||
return fmt.Errorf("failed to prune store keys %v: %w", upgrades.Deleted, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -318,11 +266,7 @@ func (s *Store) loadVersion(v uint64, upgrades *corestore.StoreUpgrades, overrid
|
||||
}
|
||||
} else {
|
||||
// if upgrades are provided, we need to load the version and apply the upgrades
|
||||
upgradeableStore, ok := s.stateCommitment.(store.UpgradeableStore)
|
||||
if !ok {
|
||||
return errors.New("SC store does not support upgrades")
|
||||
}
|
||||
if err := upgradeableStore.LoadVersionAndUpgrade(v, upgrades); err != nil {
|
||||
if err := s.stateCommitment.LoadVersionAndUpgrade(v, upgrades); err != nil {
|
||||
return fmt.Errorf("failed to load SS version with upgrades %d: %w", v, err)
|
||||
}
|
||||
}
|
||||
@ -363,18 +307,6 @@ func (s *Store) Commit(cs *corestore.Changeset) ([]byte, error) {
|
||||
|
||||
eg := new(errgroup.Group)
|
||||
|
||||
// if migrating the changeset will be sent to migration manager to fill SS
|
||||
// otherwise commit to SS async here
|
||||
if !s.isMigrating {
|
||||
eg.Go(func() error {
|
||||
if err := s.stateStorage.ApplyChangeset(cs); err != nil {
|
||||
return fmt.Errorf("failed to commit SS: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// commit SC async
|
||||
var cInfo *proof.CommitInfo
|
||||
eg.Go(func() error {
|
||||
|
||||
@ -15,13 +15,12 @@ import (
|
||||
"cosmossdk.io/store/v2/pruning"
|
||||
)
|
||||
|
||||
func newTestRootStore(ss store.VersionedWriter, sc store.Committer) *Store {
|
||||
func newTestRootStore(sc store.Committer) *Store {
|
||||
noopLog := coretesting.NewNopLogger()
|
||||
pm := pruning.NewManager(sc.(store.Pruner), ss.(store.Pruner), nil, nil)
|
||||
pm := pruning.NewManager(sc.(store.Pruner), nil)
|
||||
return &Store{
|
||||
logger: noopLog,
|
||||
telemetry: metrics.Metrics{},
|
||||
stateStorage: ss,
|
||||
stateCommitment: sc,
|
||||
pruningManager: pm,
|
||||
isMigrating: false,
|
||||
@ -30,9 +29,8 @@ func newTestRootStore(ss store.VersionedWriter, sc store.Committer) *Store {
|
||||
|
||||
func TestGetLatestState(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ss := mock.NewMockStateStorage(ctrl)
|
||||
sc := mock.NewMockStateCommitter(ctrl)
|
||||
rs := newTestRootStore(ss, sc)
|
||||
rs := newTestRootStore(sc)
|
||||
|
||||
// Get the latest version
|
||||
sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error"))
|
||||
@ -46,46 +44,36 @@ func TestGetLatestState(t *testing.T) {
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ss := mock.NewMockStateStorage(ctrl)
|
||||
sc := mock.NewMockStateCommitter(ctrl)
|
||||
rs := newTestRootStore(ss, sc)
|
||||
rs := newTestRootStore(sc)
|
||||
|
||||
// Query without Proof
|
||||
ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error"))
|
||||
sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error"))
|
||||
_, err := rs.Query(nil, 0, nil, false)
|
||||
require.Error(t, err)
|
||||
ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil)
|
||||
sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error"))
|
||||
_, err = rs.Query(nil, 0, nil, false)
|
||||
require.Error(t, err)
|
||||
ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil)
|
||||
sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil)
|
||||
v, err := rs.Query(nil, 0, nil, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("value"), v.Value)
|
||||
ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil)
|
||||
v, err = rs.Query(nil, 0, nil, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("value"), v.Value)
|
||||
|
||||
// Query with Proof
|
||||
ss.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil)
|
||||
sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil)
|
||||
sc.EXPECT().GetProof(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error"))
|
||||
v, err = rs.Query(nil, 0, nil, true)
|
||||
_, err = rs.Query(nil, 0, nil, true)
|
||||
require.Error(t, err)
|
||||
|
||||
// Query with Migration
|
||||
|
||||
rs.isMigrating = true
|
||||
sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("error"))
|
||||
sc.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return([]byte("value"), nil)
|
||||
_, err = rs.Query(nil, 0, nil, false)
|
||||
require.Error(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLoadVersion(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
ss := mock.NewMockStateStorage(ctrl)
|
||||
sc := mock.NewMockStateCommitter(ctrl)
|
||||
rs := newTestRootStore(ss, sc)
|
||||
rs := newTestRootStore(sc)
|
||||
|
||||
// LoadLatestVersion
|
||||
sc.EXPECT().GetLatestVersion().Return(uint64(0), errors.New("error"))
|
||||
@ -107,11 +95,6 @@ func TestLoadVersion(t *testing.T) {
|
||||
sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(errors.New("error"))
|
||||
err = rs.LoadVersionAndUpgrade(uint64(2), v)
|
||||
require.Error(t, err)
|
||||
sc.EXPECT().LoadVersionAndUpgrade(uint64(2), v).Return(nil)
|
||||
sc.EXPECT().GetCommitInfo(uint64(2)).Return(nil, nil)
|
||||
ss.EXPECT().PruneStoreKeys(gomock.Any(), uint64(2)).Return(errors.New("error"))
|
||||
err = rs.LoadVersionAndUpgrade(uint64(2), v)
|
||||
require.Error(t, err)
|
||||
|
||||
// LoadVersionUpgrade with Migration
|
||||
rs.isMigrating = true
|
||||
|
||||
@ -16,8 +16,6 @@ import (
|
||||
dbm "cosmossdk.io/store/v2/db"
|
||||
"cosmossdk.io/store/v2/proof"
|
||||
"cosmossdk.io/store/v2/pruning"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -47,18 +45,14 @@ func TestStorageTestSuite(t *testing.T) {
|
||||
func (s *RootStoreTestSuite) SetupTest() {
|
||||
noopLog := coretesting.NewNopLogger()
|
||||
|
||||
pebbleDB, err := pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
ss := storage.NewStorageStore(pebbleDB, noopLog)
|
||||
|
||||
tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig())
|
||||
tree2 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig())
|
||||
tree3 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig())
|
||||
sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree, testStoreKey2: tree2, testStoreKey3: tree3}, nil, dbm.NewMemDB(), noopLog)
|
||||
s.Require().NoError(err)
|
||||
|
||||
pm := pruning.NewManager(sc, ss, nil, nil)
|
||||
rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil)
|
||||
pm := pruning.NewManager(sc, nil)
|
||||
rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.rootStore = rs
|
||||
@ -67,10 +61,6 @@ func (s *RootStoreTestSuite) SetupTest() {
|
||||
func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption) {
|
||||
noopLog := coretesting.NewNopLogger()
|
||||
|
||||
pebbleDB, err := pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
ss := storage.NewStorageStore(pebbleDB, noopLog)
|
||||
|
||||
mdb := dbm.NewMemDB()
|
||||
multiTrees := make(map[string]commitment.Tree)
|
||||
for _, storeKey := range testStoreKeys {
|
||||
@ -81,18 +71,18 @@ func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption
|
||||
sc, err := commitment.NewCommitStore(multiTrees, nil, dbm.NewMemDB(), noopLog)
|
||||
s.Require().NoError(err)
|
||||
|
||||
pm := pruning.NewManager(sc, ss, config, config)
|
||||
pm := pruning.NewManager(sc, config)
|
||||
|
||||
rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil)
|
||||
rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.rootStore = rs
|
||||
}
|
||||
|
||||
func (s *RootStoreTestSuite) newStoreWithBackendMount(ss store.VersionedWriter, sc store.Committer, pm *pruning.Manager) {
|
||||
func (s *RootStoreTestSuite) newStoreWithBackendMount(sc store.Committer, pm *pruning.Manager) {
|
||||
noopLog := coretesting.NewNopLogger()
|
||||
|
||||
rs, err := New(dbm.NewMemDB(), noopLog, ss, sc, pm, nil, nil)
|
||||
rs, err := New(dbm.NewMemDB(), noopLog, sc, pm, nil, nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
s.rootStore = rs
|
||||
@ -107,10 +97,6 @@ func (s *RootStoreTestSuite) TestGetStateCommitment() {
|
||||
s.Require().Equal(s.rootStore.GetStateCommitment(), s.rootStore.(*Store).stateCommitment)
|
||||
}
|
||||
|
||||
func (s *RootStoreTestSuite) TestGetStateStorage() {
|
||||
s.Require().Equal(s.rootStore.GetStateStorage(), s.rootStore.(*Store).stateStorage)
|
||||
}
|
||||
|
||||
func (s *RootStoreTestSuite) TestSetInitialVersion() {
|
||||
initialVersion := uint64(5)
|
||||
s.Require().NoError(s.rootStore.SetInitialVersion(initialVersion))
|
||||
@ -603,17 +589,14 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() {
|
||||
|
||||
mdb1 := dbm.NewMemDB()
|
||||
mdb2 := dbm.NewMemDB()
|
||||
pebbleDB, err := pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
ss := storage.NewStorageStore(pebbleDB, noopLog)
|
||||
|
||||
tree := iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig())
|
||||
sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog)
|
||||
s.Require().NoError(err)
|
||||
|
||||
pm := pruning.NewManager(sc, ss, pruneOpt, pruneOpt)
|
||||
pm := pruning.NewManager(sc, pruneOpt)
|
||||
|
||||
s.newStoreWithBackendMount(ss, sc, pm)
|
||||
s.newStoreWithBackendMount(sc, pm)
|
||||
s.Require().NoError(s.rootStore.LoadLatestVersion())
|
||||
|
||||
// Commit enough to build up heights to prune, where on the next block we should
|
||||
@ -633,18 +616,13 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() {
|
||||
s.Require().False(ok)
|
||||
s.Require().Equal(uint64(0), actualHeightToPrune)
|
||||
|
||||
// "restart"
|
||||
pebbleDB, err = pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
ss = storage.NewStorageStore(pebbleDB, noopLog)
|
||||
|
||||
tree = iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig())
|
||||
sc, err = commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, nil, mdb2, noopLog)
|
||||
s.Require().NoError(err)
|
||||
|
||||
pm = pruning.NewManager(sc, ss, pruneOpt, pruneOpt)
|
||||
pm = pruning.NewManager(sc, pruneOpt)
|
||||
|
||||
s.newStoreWithBackendMount(ss, sc, pm)
|
||||
s.newStoreWithBackendMount(sc, pm)
|
||||
err = s.rootStore.LoadLatestVersion()
|
||||
s.Require().NoError(err)
|
||||
|
||||
@ -684,11 +662,6 @@ func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() {
|
||||
func (s *RootStoreTestSuite) TestMultiStoreRestart() {
|
||||
noopLog := coretesting.NewNopLogger()
|
||||
|
||||
pebbleDB, err := pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
|
||||
ss := storage.NewStorageStore(pebbleDB, noopLog)
|
||||
|
||||
mdb1 := dbm.NewMemDB()
|
||||
mdb2 := dbm.NewMemDB()
|
||||
multiTrees := make(map[string]commitment.Tree)
|
||||
@ -700,9 +673,9 @@ func (s *RootStoreTestSuite) TestMultiStoreRestart() {
|
||||
sc, err := commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog)
|
||||
s.Require().NoError(err)
|
||||
|
||||
pm := pruning.NewManager(sc, ss, nil, nil)
|
||||
pm := pruning.NewManager(sc, nil)
|
||||
|
||||
s.newStoreWithBackendMount(ss, sc, pm)
|
||||
s.newStoreWithBackendMount(sc, pm)
|
||||
s.Require().NoError(s.rootStore.LoadLatestVersion())
|
||||
|
||||
// perform changes
|
||||
@ -787,9 +760,9 @@ func (s *RootStoreTestSuite) TestMultiStoreRestart() {
|
||||
sc, err = commitment.NewCommitStore(multiTrees, nil, mdb2, noopLog)
|
||||
s.Require().NoError(err)
|
||||
|
||||
pm = pruning.NewManager(sc, ss, nil, nil)
|
||||
pm = pruning.NewManager(sc, nil)
|
||||
|
||||
s.newStoreWithBackendMount(ss, sc, pm)
|
||||
s.newStoreWithBackendMount(sc, pm)
|
||||
err = s.rootStore.LoadLatestVersion()
|
||||
s.Require().Nil(err)
|
||||
|
||||
|
||||
@ -14,8 +14,6 @@ import (
|
||||
"cosmossdk.io/store/v2/commitment/iavl"
|
||||
dbm "cosmossdk.io/store/v2/db"
|
||||
"cosmossdk.io/store/v2/pruning"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
)
|
||||
|
||||
type UpgradeStoreTestSuite struct {
|
||||
@ -43,14 +41,10 @@ func (s *UpgradeStoreTestSuite) SetupTest() {
|
||||
multiTrees[storeKey], _ = newTreeFn(storeKey)
|
||||
}
|
||||
|
||||
// create storage and commitment stores
|
||||
pebbleDB, err := pebbledb.New(s.T().TempDir())
|
||||
s.Require().NoError(err)
|
||||
ss := storage.NewStorageStore(pebbleDB, testLog)
|
||||
sc, err := commitment.NewCommitStore(multiTrees, nil, s.commitDB, testLog)
|
||||
s.Require().NoError(err)
|
||||
pm := pruning.NewManager(sc, ss, nil, nil)
|
||||
s.rootStore, err = New(s.commitDB, testLog, ss, sc, pm, nil, nil)
|
||||
pm := pruning.NewManager(sc, nil)
|
||||
s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil)
|
||||
s.Require().NoError(err)
|
||||
|
||||
// commit changeset
|
||||
@ -91,8 +85,8 @@ func (s *UpgradeStoreTestSuite) loadWithUpgrades(upgrades *corestore.StoreUpgrad
|
||||
|
||||
sc, err := commitment.NewCommitStore(multiTrees, oldTrees, s.commitDB, testLog)
|
||||
s.Require().NoError(err)
|
||||
pm := pruning.NewManager(sc, s.rootStore.GetStateStorage().(store.Pruner), nil, nil)
|
||||
s.rootStore, err = New(s.commitDB, testLog, s.rootStore.GetStateStorage(), sc, pm, nil, nil)
|
||||
pm := pruning.NewManager(sc, nil)
|
||||
s.rootStore, err = New(s.commitDB, testLog, sc, pm, nil, nil)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
@ -112,7 +106,7 @@ func (s *UpgradeStoreTestSuite) TestLoadVersionAndUpgrade() {
|
||||
|
||||
keyCount := 10
|
||||
// check old store keys are queryable
|
||||
oldStoreKeys := []string{"store1", "store3"}
|
||||
oldStoreKeys := []string{"store1", "store2", "store3"}
|
||||
for _, storeKey := range oldStoreKeys {
|
||||
for version := uint64(1); version <= v; version++ {
|
||||
for i := 0; i < keyCount; i++ {
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
protoio "github.com/cosmos/gogoproto/io"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
coretesting "cosmossdk.io/core/testing"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
snapshotstypes "cosmossdk.io/store/v2/snapshots/types"
|
||||
@ -109,7 +108,7 @@ type mockCommitSnapshotter struct {
|
||||
}
|
||||
|
||||
func (m *mockCommitSnapshotter) Restore(
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
|
||||
height uint64, format uint32, protoReader protoio.Reader,
|
||||
) (snapshotstypes.SnapshotItem, error) {
|
||||
if format == 0 {
|
||||
return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat
|
||||
@ -120,7 +119,6 @@ func (m *mockCommitSnapshotter) Restore(
|
||||
|
||||
var item snapshotstypes.SnapshotItem
|
||||
m.items = [][]byte{}
|
||||
keyCount := 0
|
||||
for {
|
||||
item.Reset()
|
||||
err := protoReader.ReadMsg(&item)
|
||||
@ -134,19 +132,6 @@ func (m *mockCommitSnapshotter) Restore(
|
||||
break
|
||||
}
|
||||
m.items = append(m.items, payload.Payload)
|
||||
// mock feeding chStorage to check if the loop closed properly
|
||||
//
|
||||
// ref: https://github.com/cosmos/cosmos-sdk/pull/21106
|
||||
chStorage <- &corestore.StateChanges{
|
||||
Actor: []byte("actor"),
|
||||
StateChanges: []corestore.KVPair{
|
||||
{
|
||||
Key: []byte(fmt.Sprintf("key-%d", keyCount)),
|
||||
Value: payload.Payload,
|
||||
},
|
||||
},
|
||||
}
|
||||
keyCount++
|
||||
}
|
||||
|
||||
return item, nil
|
||||
@ -169,22 +154,6 @@ func (m *mockCommitSnapshotter) SupportedFormats() []uint32 {
|
||||
return []uint32{snapshotstypes.CurrentFormat}
|
||||
}
|
||||
|
||||
type mockStorageSnapshotter struct {
|
||||
items map[string][]byte
|
||||
}
|
||||
|
||||
func (m *mockStorageSnapshotter) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error {
|
||||
// mock consuming chStorage to check if the loop closed properly
|
||||
//
|
||||
// ref: https://github.com/cosmos/cosmos-sdk/pull/21106
|
||||
for change := range chStorage {
|
||||
for _, kv := range change.StateChanges {
|
||||
m.items[string(kv.Key)] = kv.Value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockErrorCommitSnapshotter struct{}
|
||||
|
||||
var _ snapshots.CommitSnapshotter = (*mockErrorCommitSnapshotter)(nil)
|
||||
@ -194,7 +163,7 @@ func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio
|
||||
}
|
||||
|
||||
func (m *mockErrorCommitSnapshotter) Restore(
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
|
||||
height uint64, format uint32, protoReader protoio.Reader,
|
||||
) (snapshotstypes.SnapshotItem, error) {
|
||||
return snapshotstypes.SnapshotItem{}, errors.New("mock restore error")
|
||||
}
|
||||
@ -214,7 +183,7 @@ func setupBusyManager(t *testing.T) *snapshots.Manager {
|
||||
store, err := snapshots.NewStore(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
hung := newHungCommitSnapshotter()
|
||||
mgr := snapshots.NewManager(store, opts, hung, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
mgr := snapshots.NewManager(store, opts, hung, nil, coretesting.NewNopLogger())
|
||||
|
||||
// Channel to ensure the test doesn't finish until the goroutine is done.
|
||||
// Without this, there are intermittent test failures about
|
||||
@ -258,7 +227,7 @@ func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writ
|
||||
}
|
||||
|
||||
func (m *hungCommitSnapshotter) Restore(
|
||||
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
|
||||
height uint64, format uint32, protoReader protoio.Reader,
|
||||
) (snapshotstypes.SnapshotItem, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
@ -12,7 +12,6 @@ import (
|
||||
"sync"
|
||||
|
||||
corelog "cosmossdk.io/core/log"
|
||||
corestore "cosmossdk.io/core/store"
|
||||
errorsmod "cosmossdk.io/errors/v2"
|
||||
storeerrors "cosmossdk.io/store/v2/errors"
|
||||
"cosmossdk.io/store/v2/snapshots/types"
|
||||
@ -38,8 +37,6 @@ type Manager struct {
|
||||
opts SnapshotOptions
|
||||
// commitSnapshotter is the snapshotter for the commitment state.
|
||||
commitSnapshotter CommitSnapshotter
|
||||
// storageSnapshotter is the snapshotter for the storage state.
|
||||
storageSnapshotter StorageSnapshotter
|
||||
|
||||
logger corelog.Logger
|
||||
|
||||
@ -76,17 +73,16 @@ const (
|
||||
var ErrOptsZeroSnapshotInterval = errors.New("snapshot-interval must not be 0")
|
||||
|
||||
// NewManager creates a new manager.
|
||||
func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, storageSnapshotter StorageSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager {
|
||||
func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager {
|
||||
if extensions == nil {
|
||||
extensions = map[string]ExtensionSnapshotter{}
|
||||
}
|
||||
return &Manager{
|
||||
store: store,
|
||||
opts: opts,
|
||||
commitSnapshotter: commitSnapshotter,
|
||||
storageSnapshotter: storageSnapshotter,
|
||||
extensions: extensions,
|
||||
logger: logger,
|
||||
store: store,
|
||||
opts: opts,
|
||||
commitSnapshotter: commitSnapshotter,
|
||||
extensions: extensions,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
@ -398,23 +394,10 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.
|
||||
return payload.Payload, nil
|
||||
}
|
||||
|
||||
// chStorage is the channel to pass the KV pairs to the storage snapshotter.
|
||||
chStorage := make(chan *corestore.StateChanges, defaultStorageChannelBufferSize)
|
||||
|
||||
storageErrs := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(storageErrs)
|
||||
err := m.storageSnapshotter.Restore(snapshot.Height, chStorage)
|
||||
if err != nil {
|
||||
storageErrs <- err
|
||||
}
|
||||
}()
|
||||
|
||||
nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader, chStorage)
|
||||
nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader)
|
||||
if err != nil {
|
||||
return errorsmod.Wrap(err, "multistore restore")
|
||||
}
|
||||
close(chStorage)
|
||||
|
||||
for {
|
||||
if nextItem.Item == nil {
|
||||
@ -445,11 +428,6 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.
|
||||
}
|
||||
}
|
||||
|
||||
// wait for storage snapshotter to complete
|
||||
if err := <-storageErrs; err != nil {
|
||||
return errorsmod.Wrap(err, "storage snapshotter")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@ package snapshots_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -19,8 +18,7 @@ var opts = snapshots.NewSnapshotOptions(1500, 2)
|
||||
func TestManager_List(t *testing.T) {
|
||||
store := setupStore(t)
|
||||
commitSnapshotter := &mockCommitSnapshotter{}
|
||||
storageSnapshotter := &mockStorageSnapshotter{}
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger())
|
||||
|
||||
mgrList, err := manager.List()
|
||||
require.NoError(t, err)
|
||||
@ -41,7 +39,7 @@ func TestManager_List(t *testing.T) {
|
||||
|
||||
func TestManager_LoadChunk(t *testing.T) {
|
||||
store := setupStore(t)
|
||||
manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
|
||||
// Existing chunk should return body
|
||||
chunk, err := manager.LoadChunk(2, 1, 1)
|
||||
@ -73,7 +71,7 @@ func TestManager_Take(t *testing.T) {
|
||||
extSnapshotter := newExtSnapshotter(10)
|
||||
|
||||
expectChunks := snapshotItems(items, extSnapshotter)
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger())
|
||||
err := manager.RegisterExtensions(extSnapshotter)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -112,7 +110,7 @@ func TestManager_Take(t *testing.T) {
|
||||
|
||||
func TestManager_Prune(t *testing.T) {
|
||||
store := setupStore(t)
|
||||
manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
|
||||
pruned, err := manager.Prune(2)
|
||||
require.NoError(t, err)
|
||||
@ -131,9 +129,8 @@ func TestManager_Prune(t *testing.T) {
|
||||
func TestManager_Restore(t *testing.T) {
|
||||
store := setupStore(t)
|
||||
target := &mockCommitSnapshotter{}
|
||||
storageSnapshotter := &mockStorageSnapshotter{items: map[string][]byte{}}
|
||||
extSnapshotter := newExtSnapshotter(0)
|
||||
manager := snapshots.NewManager(store, opts, target, storageSnapshotter, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, target, nil, coretesting.NewNopLogger())
|
||||
err := manager.RegisterExtensions(extSnapshotter)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -206,14 +203,6 @@ func TestManager_Restore(t *testing.T) {
|
||||
assert.Equal(t, expectItems, target.items)
|
||||
assert.Equal(t, 10, len(extSnapshotter.state))
|
||||
|
||||
// make sure storageSnapshotter items are properly stored
|
||||
for i, item := range target.items {
|
||||
key := fmt.Sprintf("key-%d", i)
|
||||
chunk := storageSnapshotter.items[key]
|
||||
require.NotNil(t, chunk)
|
||||
require.Equal(t, item, chunk)
|
||||
}
|
||||
|
||||
// The snapshot is saved in local snapshot store
|
||||
snapshots, err := store.List()
|
||||
require.NoError(t, err)
|
||||
@ -260,7 +249,7 @@ func TestManager_TakeError(t *testing.T) {
|
||||
snapshotter := &mockErrorCommitSnapshotter{}
|
||||
store, err := snapshots.NewStore(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
manager := snapshots.NewManager(store, opts, snapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, snapshotter, nil, coretesting.NewNopLogger())
|
||||
|
||||
_, err = manager.Create(1)
|
||||
require.Error(t, err)
|
||||
@ -276,12 +265,11 @@ func TestSnapshot_Take_Restore(t *testing.T) {
|
||||
commitSnapshotter := &mockCommitSnapshotter{
|
||||
items: items,
|
||||
}
|
||||
storageSnapshotter := &mockStorageSnapshotter{items: map[string][]byte{}}
|
||||
|
||||
extSnapshotter := newExtSnapshotter(10)
|
||||
|
||||
expectChunks := snapshotItems(items, extSnapshotter)
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger())
|
||||
err := manager.RegisterExtensions(extSnapshotter)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -367,7 +355,7 @@ func TestSnapshot_Take_Prune(t *testing.T) {
|
||||
extSnapshotter := newExtSnapshotter(10)
|
||||
|
||||
expectChunks := snapshotItems(items, extSnapshotter)
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger())
|
||||
err := manager.RegisterExtensions(extSnapshotter)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -446,7 +434,7 @@ func TestSnapshot_Pruning_Take_Snapshot_Parallel(t *testing.T) {
|
||||
extSnapshotter := newExtSnapshotter(10)
|
||||
|
||||
expectChunks := snapshotItems(items, extSnapshotter)
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, opts, commitSnapshotter, nil, coretesting.NewNopLogger())
|
||||
err := manager.RegisterExtensions(extSnapshotter)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -518,7 +506,7 @@ func TestSnapshot_SnapshotIfApplicable(t *testing.T) {
|
||||
|
||||
snapshotOpts := snapshots.NewSnapshotOptions(1, 1)
|
||||
|
||||
manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger())
|
||||
manager := snapshots.NewManager(store, snapshotOpts, commitSnapshotter, nil, coretesting.NewNopLogger())
|
||||
err := manager.RegisterExtensions(extSnapshotter)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@ -3,7 +3,6 @@ package snapshots
|
||||
import (
|
||||
protoio "github.com/cosmos/gogoproto/io"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2/snapshots/types"
|
||||
)
|
||||
|
||||
@ -14,13 +13,7 @@ type CommitSnapshotter interface {
|
||||
Snapshot(version uint64, protoWriter protoio.Writer) error
|
||||
|
||||
// Restore restores the commitment state from the snapshot reader.
|
||||
Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges) (types.SnapshotItem, error)
|
||||
}
|
||||
|
||||
// StorageSnapshotter defines an API for restoring snapshots of the storage state.
|
||||
type StorageSnapshotter interface {
|
||||
// Restore restores the storage state from the given channel.
|
||||
Restore(version uint64, chStorage <-chan *corestore.StateChanges) error
|
||||
Restore(version uint64, format uint32, protoReader protoio.Reader) (types.SnapshotItem, error)
|
||||
}
|
||||
|
||||
// ExtensionPayloadReader read extension payloads,
|
||||
|
||||
@ -1,107 +0,0 @@
|
||||
# State Storage (SS)
|
||||
|
||||
The `storage` package contains the state storage (SS) implementation. Specifically,
|
||||
it contains RocksDB, PebbleDB, and SQLite (Btree) backend implementations of the
|
||||
`VersionedWriter` interface.
|
||||
|
||||
The goal of SS is to provide a modular storage backend, i.e. multiple implementations,
|
||||
to facilitate storing versioned raw key/value pairs in a fast embedded database,
|
||||
although an embedded database is not required, i.e. you could use a replicated
|
||||
RDBMS system.
|
||||
|
||||
The responsibility and functions of SS include the following:
|
||||
|
||||
* Provide fast and efficient queries for versioned raw key/value pairs
|
||||
* Provide versioned CRUD operations
|
||||
* Provide versioned batching functionality
|
||||
* Provide versioned iteration (forward and reverse) functionality
|
||||
* Provide pruning functionality
|
||||
|
||||
All of the functionality provided by an SS backend should work under a versioned
|
||||
scheme, i.e. a user should be able to get, store, and iterate over keys for the
|
||||
latest and historical versions efficiently.
|
||||
|
||||
## Backends
|
||||
|
||||
### RocksDB
|
||||
|
||||
The RocksDB implementation is a CGO-based SS implementation. It fully supports
|
||||
the `VersionedWriter` API and is arguably the most efficient implementation. It
|
||||
also supports versioning out-of-the-box using User-defined Timestamps in
|
||||
ColumnFamilies (CF). However, it requires the CGO dependency which can complicate
|
||||
an app’s build process.
|
||||
|
||||
### PebbleDB
|
||||
|
||||
The PebbleDB implementation is a native Go SS implementation that is primarily an
|
||||
alternative to RocksDB. Since it does not support CF, results in the fact that we
|
||||
need to implement versioning (MVCC) ourselves. This comes with added implementation
|
||||
complexity and potential performance overhead. However, it is a pure Go implementation
|
||||
and does not require CGO.
|
||||
|
||||
### SQLite (Btree)
|
||||
|
||||
The SQLite implementation is another CGO-based SS implementation. It fully supports
|
||||
the `VersionedWriter` API. The implementation is relatively straightforward and
|
||||
easy to understand as it’s entirely SQL-based. However, benchmarks show that this
|
||||
options is least performant, even for reads. This SS backend has a lot of promise,
|
||||
but needs more benchmarking and potential SQL optimizations, like dedicated tables
|
||||
for certain aspects of state, e.g. latest state, to be extremely performant.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Benchmarks for basic operations on all supported native SS implementations can
|
||||
be found in `store/storage/storage_bench_test.go`.
|
||||
|
||||
At the time of writing, the following benchmarks were performed:
|
||||
|
||||
```shell
|
||||
name time/op
|
||||
Get/backend_rocksdb_versiondb_opts-10 7.41µs ± 0%
|
||||
Get/backend_pebbledb_default_opts-10 6.17µs ± 0%
|
||||
Get/backend_btree_sqlite-10 29.1µs ± 0%
|
||||
ApplyChangeset/backend_pebbledb_default_opts-10 5.73ms ± 0%
|
||||
ApplyChangeset/backend_btree_sqlite-10 56.9ms ± 0%
|
||||
ApplyChangeset/backend_rocksdb_versiondb_opts-10 4.07ms ± 0%
|
||||
Iterate/backend_pebbledb_default_opts-10 1.04s ± 0%
|
||||
Iterate/backend_btree_sqlite-10 1.59s ± 0%
|
||||
Iterate/backend_rocksdb_versiondb_opts-10 778ms ± 0%
|
||||
```
|
||||
|
||||
## Pruning
|
||||
|
||||
Pruning is the process of efficiently managing and removing outdated or redundant
|
||||
data from the State Storage (SS). To facilitate this, the SS backend must implement
|
||||
the `Pruner` interface, allowing the `PruningManager` to execute data pruning operations
|
||||
according to the specified `PruningOption`.
|
||||
|
||||
## State Sync
|
||||
|
||||
State storage (SS) does not have a direct notion of state sync. Rather, `snapshots.Manager`
|
||||
is responsible for creating and restoring snapshots of the entire state. The
|
||||
`snapshots.Manager` has a `StorageSnapshotter` field which is fulfilled by the
|
||||
`StorageStore` type, specifically it implements the `Restore` method. The `Restore`
|
||||
method reads off of a provided channel and writes key/value pairs directly to a
|
||||
batch object which is committed to the underlying SS engine.
|
||||
|
||||
## Non-Consensus Data
|
||||
|
||||
<!-- TODO -->
|
||||
|
||||
## Usage
|
||||
|
||||
An SS backend is meant to be used within a broader store implementation, as it
|
||||
only stores data for direct and historical query purposes. We define a `Database`
|
||||
interface in the `storage` package which is mean to be represent a `VersionedWriter`
|
||||
with only the necessary methods. The `StorageStore` interface is meant to wrap or
|
||||
accept this `Database` type, e.g. RocksDB.
|
||||
|
||||
The `StorageStore` interface is an abstraction or wrapper around the backing SS
|
||||
engine can be seen as the main entry point to using SS.
|
||||
|
||||
Higher up the stack, there should exist a `root.Store` implementation. The `root.Store`
|
||||
is meant to encapsulate both an SS backend and an SC backend. The SS backend is
|
||||
defined by this `StorageStore` implementation.
|
||||
|
||||
In short, initialize your SS engine of choice and then provide that to `NewStorageStore`
|
||||
which will further be provided to `root.Store` as the SS backend.
|
||||
@ -1,27 +0,0 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2"
|
||||
)
|
||||
|
||||
// Database is an interface that wraps the storage database methods. A wrapper
|
||||
// is useful for instances where you want to perform logic that is identical for all SS
|
||||
// backends, such as restoring snapshots.
|
||||
type Database interface {
|
||||
NewBatch(version uint64) (store.Batch, error)
|
||||
Has(storeKey []byte, version uint64, key []byte) (bool, error)
|
||||
Get(storeKey []byte, version uint64, key []byte) ([]byte, error)
|
||||
GetLatestVersion() (uint64, error)
|
||||
SetLatestVersion(version uint64) error
|
||||
VersionExists(version uint64) (bool, error)
|
||||
|
||||
Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error)
|
||||
|
||||
Prune(version uint64) error
|
||||
|
||||
io.Closer
|
||||
}
|
||||
@ -1,98 +0,0 @@
|
||||
package pebbledb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
|
||||
"cosmossdk.io/store/v2"
|
||||
)
|
||||
|
||||
var _ store.Batch = (*Batch)(nil)
|
||||
|
||||
type Batch struct {
|
||||
storage *pebble.DB
|
||||
batch *pebble.Batch
|
||||
version uint64
|
||||
sync bool
|
||||
size int
|
||||
}
|
||||
|
||||
const (
|
||||
oneIf64Bit = ^uint(0) >> 63
|
||||
maxUint32OrInt = (1<<31)<<oneIf64Bit - 1
|
||||
maxVarintLen32 = 5
|
||||
)
|
||||
|
||||
func keyValueSize(key, value []byte) int {
|
||||
return len(key) + len(value) + 1 + 2*maxVarintLen32
|
||||
}
|
||||
|
||||
func NewBatch(storage *pebble.DB, version uint64, sync bool) (*Batch, error) {
|
||||
var versionBz [VersionSize]byte
|
||||
binary.LittleEndian.PutUint64(versionBz[:], version)
|
||||
|
||||
batch := storage.NewBatch()
|
||||
|
||||
if err := batch.Set([]byte(latestVersionKey), versionBz[:], nil); err != nil {
|
||||
return nil, fmt.Errorf("failed to write PebbleDB batch: %w", err)
|
||||
}
|
||||
|
||||
return &Batch{
|
||||
storage: storage,
|
||||
batch: batch,
|
||||
version: version,
|
||||
sync: sync,
|
||||
size: keyValueSize([]byte(latestVersionKey), versionBz[:]),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *Batch) Size() int {
|
||||
return b.batch.Len()
|
||||
}
|
||||
|
||||
func (b *Batch) Reset() error {
|
||||
b.batch.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) set(storeKey []byte, tombstone uint64, key, value []byte) error {
|
||||
prefixedKey := MVCCEncode(prependStoreKey(storeKey, key), b.version)
|
||||
prefixedVal := MVCCEncode(value, tombstone)
|
||||
|
||||
size := keyValueSize(prefixedKey, prefixedVal)
|
||||
if b.size+size > maxUint32OrInt {
|
||||
// 4 GB is huge, probably genesis; flush and reset
|
||||
if err := b.batch.Commit(&pebble.WriteOptions{Sync: b.sync}); err != nil {
|
||||
return fmt.Errorf("max batch size exceed: failed to write PebbleDB batch: %w", err)
|
||||
}
|
||||
b.batch.Reset()
|
||||
b.size = 0
|
||||
}
|
||||
|
||||
if err := b.batch.Set(prefixedKey, prefixedVal, nil); err != nil {
|
||||
return fmt.Errorf("failed to write PebbleDB batch: %w", err)
|
||||
}
|
||||
b.size += size
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Batch) Set(storeKey, key, value []byte) error {
|
||||
return b.set(storeKey, 0, key, value)
|
||||
}
|
||||
|
||||
func (b *Batch) Delete(storeKey, key []byte) error {
|
||||
return b.set(storeKey, b.version, key, []byte(tombstoneVal))
|
||||
}
|
||||
|
||||
// Write flushes any accumulated data to disk and closes the batch.
|
||||
func (b *Batch) Write() (err error) {
|
||||
defer func() {
|
||||
err = errors.Join(err, b.batch.Close())
|
||||
}()
|
||||
|
||||
return b.batch.Commit(&pebble.WriteOptions{Sync: b.sync})
|
||||
}
|
||||
@ -1,242 +0,0 @@
|
||||
package pebbledb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
)
|
||||
|
||||
// MVCCComparer returns a PebbleDB Comparer with encoding and decoding routines
|
||||
// for MVCC control, used to compare and store versioned keys.
|
||||
//
|
||||
// Note: This Comparer implementation is largely based on PebbleDB's internal
|
||||
// MVCC example, which can be found here:
|
||||
// https://github.com/cockroachdb/pebble/blob/master/cmd/pebble/mvcc.go
|
||||
var MVCCComparer = &pebble.Comparer{
|
||||
Name: "ss_pebbledb_comparator",
|
||||
|
||||
Compare: MVCCKeyCompare,
|
||||
|
||||
AbbreviatedKey: func(k []byte) uint64 {
|
||||
key, _, ok := SplitMVCCKey(k)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
return pebble.DefaultComparer.AbbreviatedKey(key)
|
||||
},
|
||||
|
||||
Equal: func(a, b []byte) bool {
|
||||
return MVCCKeyCompare(a, b) == 0
|
||||
},
|
||||
|
||||
Separator: func(dst, a, b []byte) []byte {
|
||||
aKey, _, ok := SplitMVCCKey(a)
|
||||
if !ok {
|
||||
return append(dst, a...)
|
||||
}
|
||||
|
||||
bKey, _, ok := SplitMVCCKey(b)
|
||||
if !ok {
|
||||
return append(dst, a...)
|
||||
}
|
||||
|
||||
// if the keys are the same just return a
|
||||
if bytes.Equal(aKey, bKey) {
|
||||
return append(dst, a...)
|
||||
}
|
||||
|
||||
n := len(dst)
|
||||
|
||||
// MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the
|
||||
// same semantics as pebble.DefaultComparer, so reuse the latter's Separator
|
||||
// implementation.
|
||||
dst = pebble.DefaultComparer.Separator(dst, aKey, bKey)
|
||||
|
||||
// Did we pick a separator different than aKey? If we did not, we can't do
|
||||
// better than a.
|
||||
buf := dst[n:]
|
||||
if bytes.Equal(aKey, buf) {
|
||||
return append(dst[:n], a...)
|
||||
}
|
||||
|
||||
// The separator is > aKey, so we only need to add the timestamp sentinel.
|
||||
return append(dst, 0)
|
||||
},
|
||||
|
||||
ImmediateSuccessor: func(dst, a []byte) []byte {
|
||||
// The key `a` is guaranteed to be a bare prefix: It's a key without a version
|
||||
// — just a trailing 0-byte to signify the length of the version. For example
|
||||
// the user key "foo" is encoded as: "foo\0". We need to encode the immediate
|
||||
// successor to "foo", which in the natural byte ordering is "foo\0". Append
|
||||
// a single additional zero, to encode the user key "foo\0" with a zero-length
|
||||
// version.
|
||||
return append(append(dst, a...), 0)
|
||||
},
|
||||
|
||||
Successor: func(dst, a []byte) []byte {
|
||||
aKey, _, ok := SplitMVCCKey(a)
|
||||
if !ok {
|
||||
return append(dst, a...)
|
||||
}
|
||||
|
||||
n := len(dst)
|
||||
|
||||
// MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the
|
||||
// same semantics as pebble.DefaultComparer, so reuse the latter's Successor
|
||||
// implementation.
|
||||
dst = pebble.DefaultComparer.Successor(dst, aKey)
|
||||
|
||||
// Did we pick a successor different than aKey? If we did not, we can't do
|
||||
// better than a.
|
||||
buf := dst[n:]
|
||||
if bytes.Equal(aKey, buf) {
|
||||
return append(dst[:n], a...)
|
||||
}
|
||||
|
||||
// The successor is > aKey, so we only need to add the timestamp sentinel.
|
||||
return append(dst, 0)
|
||||
},
|
||||
|
||||
FormatKey: func(k []byte) fmt.Formatter {
|
||||
return mvccKeyFormatter{key: k}
|
||||
},
|
||||
|
||||
Split: func(k []byte) int {
|
||||
key, _, ok := SplitMVCCKey(k)
|
||||
if !ok {
|
||||
return len(k)
|
||||
}
|
||||
|
||||
// This matches the behavior of libroach/KeyPrefix. RocksDB requires that
|
||||
// keys generated via a SliceTransform be comparable with normal encoded
|
||||
// MVCC keys. Encoded MVCC keys have a suffix indicating the number of
|
||||
// bytes of timestamp data. MVCC keys without a timestamp have a suffix of
|
||||
// 0. We're careful in EncodeKey to make sure that the user-key always has
|
||||
// a trailing 0. If there is no timestamp this falls out naturally. If
|
||||
// there is a timestamp we prepend a 0 to the encoded timestamp data.
|
||||
return len(key) + 1
|
||||
},
|
||||
}
|
||||
|
||||
type mvccKeyFormatter struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
func (f mvccKeyFormatter) Format(s fmt.State, verb rune) {
|
||||
k, vBz, ok := SplitMVCCKey(f.key)
|
||||
if ok {
|
||||
v, _ := decodeUint64Ascending(vBz)
|
||||
fmt.Fprintf(s, "%s/%d", k, v)
|
||||
} else {
|
||||
fmt.Fprintf(s, "%s", f.key)
|
||||
}
|
||||
}
|
||||
|
||||
// SplitMVCCKey accepts an MVCC key and returns the "user" key, the MVCC version,
|
||||
// and a boolean indicating if the provided key is an MVCC key.
|
||||
//
|
||||
// Note, internally, we must make a copy of the provided mvccKey argument, which
|
||||
// typically comes from the Key() method as it's not safe.
|
||||
func SplitMVCCKey(mvccKey []byte) (key, version []byte, ok bool) {
|
||||
if len(mvccKey) == 0 {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
mvccKeyCopy := bytes.Clone(mvccKey)
|
||||
|
||||
n := len(mvccKeyCopy) - 1
|
||||
tsLen := int(mvccKeyCopy[n])
|
||||
if n < tsLen {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
key = mvccKeyCopy[:n-tsLen]
|
||||
if tsLen > 0 {
|
||||
version = mvccKeyCopy[n-tsLen+1 : n]
|
||||
}
|
||||
|
||||
return key, version, true
|
||||
}
|
||||
|
||||
// MVCCKeyCompare compares two MVCC keys.
|
||||
func MVCCKeyCompare(a, b []byte) int {
|
||||
aEnd := len(a) - 1
|
||||
bEnd := len(b) - 1
|
||||
if aEnd < 0 || bEnd < 0 {
|
||||
// This should never happen unless there is some sort of corruption of
|
||||
// the keys. This is a little bizarre, but the behavior exactly matches
|
||||
// engine/db.cc:DBComparator.
|
||||
return bytes.Compare(a, b)
|
||||
}
|
||||
|
||||
// Compute the index of the separator between the key and the timestamp.
|
||||
aSep := aEnd - int(a[aEnd])
|
||||
bSep := bEnd - int(b[bEnd])
|
||||
if aSep < 0 || bSep < 0 {
|
||||
// This should never happen unless there is some sort of corruption of
|
||||
// the keys. This is a little bizarre, but the behavior exactly matches
|
||||
// engine/db.cc:DBComparator.
|
||||
return bytes.Compare(a, b)
|
||||
}
|
||||
|
||||
// compare the "user key" part of the key
|
||||
if c := bytes.Compare(a[:aSep], b[:bSep]); c != 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
// compare the timestamp part of the key
|
||||
aTS := a[aSep:aEnd]
|
||||
bTS := b[bSep:bEnd]
|
||||
if len(aTS) == 0 {
|
||||
if len(bTS) == 0 {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
} else if len(bTS) == 0 {
|
||||
return 1
|
||||
}
|
||||
|
||||
return bytes.Compare(aTS, bTS)
|
||||
}
|
||||
|
||||
// MVCCEncode encodes a key and version into an MVCC format.
|
||||
// The format is: <key>\x00[<version>]<#version-bytes>
|
||||
// If the version is 0, only the key and a null byte are encoded.
|
||||
func MVCCEncode(key []byte, version uint64) (dst []byte) {
|
||||
dst = append(dst, key...)
|
||||
dst = append(dst, 0)
|
||||
|
||||
if version != 0 {
|
||||
extra := byte(1 + 8)
|
||||
dst = encodeUint64Ascending(dst, version)
|
||||
dst = append(dst, extra)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeUint64Ascending encodes the uint64 value using a big-endian 8 byte
|
||||
// representation. The bytes are appended to the supplied buffer and
|
||||
// the final buffer is returned.
|
||||
func encodeUint64Ascending(dst []byte, v uint64) []byte {
|
||||
return append(
|
||||
dst,
|
||||
byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32),
|
||||
byte(v>>24), byte(v>>16), byte(v>>8), byte(v),
|
||||
)
|
||||
}
|
||||
|
||||
// decodeUint64Ascending decodes a uint64 from the input buffer, treating
|
||||
// the input as a big-endian 8 byte uint64 representation. The decoded uint64 is
|
||||
// returned.
|
||||
func decodeUint64Ascending(b []byte) (uint64, error) {
|
||||
if len(b) < 8 {
|
||||
return 0, fmt.Errorf("insufficient bytes to decode uint64 int value; expected 8; got %d", len(b))
|
||||
}
|
||||
|
||||
v := binary.BigEndian.Uint64(b)
|
||||
return v, nil
|
||||
}
|
||||
@ -1,58 +0,0 @@
|
||||
package pebbledb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMVCCKey(t *testing.T) {
|
||||
for i := uint64(1); i < 1001; i++ {
|
||||
keyA := MVCCEncode([]byte("key001"), i)
|
||||
|
||||
key, vBz, ok := SplitMVCCKey(keyA)
|
||||
|
||||
version, err := decodeUint64Ascending(vBz)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, i, version)
|
||||
require.Equal(t, []byte("key001"), key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMVCCKeyCompare(t *testing.T) {
|
||||
testCases := []struct {
|
||||
keyA []byte
|
||||
keyB []byte
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
// same key, same version
|
||||
keyA: MVCCEncode([]byte("key001"), 1),
|
||||
keyB: MVCCEncode([]byte("key001"), 1),
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
// same key, different version
|
||||
keyA: MVCCEncode([]byte("key001"), 1),
|
||||
keyB: MVCCEncode([]byte("key001"), 2),
|
||||
expected: -1,
|
||||
},
|
||||
{
|
||||
// same key, different version (inverse)
|
||||
keyA: MVCCEncode([]byte("key001"), 2),
|
||||
keyB: MVCCEncode([]byte("key001"), 1),
|
||||
expected: 1,
|
||||
},
|
||||
{
|
||||
// different key, same version
|
||||
keyA: MVCCEncode([]byte("key001"), 1),
|
||||
keyB: MVCCEncode([]byte("key009"), 1),
|
||||
expected: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
require.Equalf(t, tc.expected, MVCCKeyCompare(tc.keyA, tc.keyB), "keyA: %s, keyB: %s", tc.keyA, tc.keyB)
|
||||
}
|
||||
}
|
||||
@ -1,528 +0,0 @@
|
||||
package pebbledb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2"
|
||||
storeerrors "cosmossdk.io/store/v2/errors"
|
||||
"cosmossdk.io/store/v2/internal/encoding"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/util"
|
||||
)
|
||||
|
||||
const (
|
||||
VersionSize = 8
|
||||
// PruneCommitBatchSize defines the size, in number of key/value pairs, to prune
|
||||
// in a single batch.
|
||||
PruneCommitBatchSize = 50
|
||||
// batchBufferSize defines the maximum size of a batch before it is committed.
|
||||
batchBufferSize = 100_000
|
||||
|
||||
StorePrefixTpl = "s/k:%s/" // s/k:<storeKey>
|
||||
removedStoreKeyPrefix = "s/_removed_key" // NB: removedStoreKeys key must be lexically smaller than StorePrefixTpl
|
||||
latestVersionKey = "s/_latest" // NB: latestVersionKey key must be lexically smaller than StorePrefixTpl
|
||||
pruneHeightKey = "s/_prune_height" // NB: pruneHeightKey key must be lexically smaller than StorePrefixTpl
|
||||
tombstoneVal = "TOMBSTONE"
|
||||
)
|
||||
|
||||
var (
|
||||
_ storage.Database = (*Database)(nil)
|
||||
_ store.UpgradableDatabase = (*Database)(nil)
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
storage *pebble.DB
|
||||
|
||||
// earliestVersion defines the earliest version set in the database, which is
|
||||
// only updated when the database is pruned.
|
||||
earliestVersion uint64
|
||||
|
||||
// Sync is whether to sync writes through the OS buffer cache and down onto
|
||||
// the actual disk, if applicable. Setting Sync is required for durability of
|
||||
// individual write operations but can result in slower writes.
|
||||
//
|
||||
// If false, and the process or machine crashes, then a recent write may be
|
||||
// lost. This is due to the recently written data being buffered inside the
|
||||
// process running Pebble. This differs from the semantics of a write system
|
||||
// call in which the data is buffered in the OS buffer cache and would thus
|
||||
// survive a process crash.
|
||||
sync bool
|
||||
}
|
||||
|
||||
func New(dataDir string) (*Database, error) {
|
||||
opts := &pebble.Options{
|
||||
Comparer: MVCCComparer,
|
||||
}
|
||||
opts = opts.EnsureDefaults()
|
||||
|
||||
db, err := pebble.Open(dataDir, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open PebbleDB: %w", err)
|
||||
}
|
||||
|
||||
earliestVersion, err := getEarliestVersion(db)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get the earliest version: %w", err)
|
||||
}
|
||||
|
||||
return &Database{
|
||||
storage: db,
|
||||
earliestVersion: earliestVersion,
|
||||
sync: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewWithDB(storage *pebble.DB, sync bool) *Database {
|
||||
earliestVersion, err := getEarliestVersion(storage)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to get the earliest version: %w", err))
|
||||
}
|
||||
|
||||
return &Database{
|
||||
storage: storage,
|
||||
earliestVersion: earliestVersion,
|
||||
sync: sync,
|
||||
}
|
||||
}
|
||||
|
||||
func (db *Database) SetSync(sync bool) {
|
||||
db.sync = sync
|
||||
}
|
||||
|
||||
func (db *Database) Close() error {
|
||||
err := db.storage.Close()
|
||||
db.storage = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *Database) NewBatch(version uint64) (store.Batch, error) {
|
||||
b, err := NewBatch(db.storage, version, db.sync)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (db *Database) SetLatestVersion(version uint64) error {
|
||||
var ts [VersionSize]byte
|
||||
binary.LittleEndian.PutUint64(ts[:], version)
|
||||
|
||||
return db.storage.Set([]byte(latestVersionKey), ts[:], &pebble.WriteOptions{Sync: db.sync})
|
||||
}
|
||||
|
||||
func (db *Database) GetLatestVersion() (uint64, error) {
|
||||
bz, closer, err := db.storage.Get([]byte(latestVersionKey))
|
||||
if err != nil {
|
||||
if errors.Is(err, pebble.ErrNotFound) {
|
||||
// in case of a fresh database
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(bz) == 0 {
|
||||
return 0, closer.Close()
|
||||
}
|
||||
|
||||
return binary.LittleEndian.Uint64(bz), closer.Close()
|
||||
}
|
||||
|
||||
func (db *Database) VersionExists(version uint64) (bool, error) {
|
||||
latestVersion, err := db.GetLatestVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return latestVersion >= version && version >= db.earliestVersion, nil
|
||||
}
|
||||
|
||||
func (db *Database) setPruneHeight(pruneVersion uint64) error {
|
||||
db.earliestVersion = pruneVersion + 1
|
||||
|
||||
var ts [VersionSize]byte
|
||||
binary.LittleEndian.PutUint64(ts[:], pruneVersion)
|
||||
|
||||
return db.storage.Set([]byte(pruneHeightKey), ts[:], &pebble.WriteOptions{Sync: db.sync})
|
||||
}
|
||||
|
||||
func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
val, err := db.Get(storeKey, version, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return val != nil, nil
|
||||
}
|
||||
|
||||
func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) {
|
||||
if targetVersion < db.earliestVersion {
|
||||
return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion, RequestedVersion: targetVersion}
|
||||
}
|
||||
|
||||
prefixedVal, err := getMVCCSlice(db.storage, storeKey, key, targetVersion)
|
||||
if err != nil {
|
||||
if errors.Is(err, storeerrors.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to perform PebbleDB read: %w", err)
|
||||
}
|
||||
|
||||
valBz, tombBz, ok := SplitMVCCKey(prefixedVal)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid PebbleDB MVCC value: %s", prefixedVal)
|
||||
}
|
||||
|
||||
// A tombstone of zero or a target version that is less than the tombstone
|
||||
// version means the key is not deleted at the target version.
|
||||
if len(tombBz) == 0 {
|
||||
return valBz, nil
|
||||
}
|
||||
|
||||
tombstone, err := decodeUint64Ascending(tombBz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode value tombstone: %w", err)
|
||||
}
|
||||
|
||||
// A tombstone of zero or a target version that is less than the tombstone
|
||||
// version means the key is not deleted at the target version.
|
||||
if targetVersion < tombstone {
|
||||
return valBz, nil
|
||||
}
|
||||
|
||||
// the value is considered deleted
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Prune removes all versions of all keys that are <= the given version.
|
||||
//
|
||||
// Note, the implementation of this method is inefficient and can be potentially
|
||||
// time consuming given the size of the database and when the last pruning occurred
|
||||
// (if any). This is because the implementation iterates over all keys in the
|
||||
// database in order to delete them.
|
||||
//
|
||||
// See: https://github.com/cockroachdb/cockroach/blob/33623e3ee420174a4fd3226d1284b03f0e3caaac/pkg/storage/mvcc.go#L3182
|
||||
func (db *Database) Prune(version uint64) (err error) {
|
||||
itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: []byte("s/k:")})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer itr.Close()
|
||||
|
||||
batch := db.storage.NewBatch()
|
||||
defer func() {
|
||||
err = errors.Join(err, batch.Close())
|
||||
}()
|
||||
|
||||
var (
|
||||
batchCounter int
|
||||
prevKey, prevKeyPrefixed, prevPrefixedVal []byte
|
||||
prevKeyVersion uint64
|
||||
)
|
||||
|
||||
for itr.First(); itr.Valid(); {
|
||||
prefixedKey := slices.Clone(itr.Key())
|
||||
|
||||
keyBz, verBz, ok := SplitMVCCKey(prefixedKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid PebbleDB MVCC key: %s", prefixedKey)
|
||||
}
|
||||
|
||||
var keyVersion uint64
|
||||
// handle version 0 (no version prefix)
|
||||
if len(verBz) > 0 {
|
||||
keyVersion, err = decodeUint64Ascending(verBz)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode key version: %w", err)
|
||||
}
|
||||
}
|
||||
// seek to next key if we are at a version which is higher than prune height
|
||||
if keyVersion > version {
|
||||
itr.NextPrefix()
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete a key if another entry for that key exists a larger version than
|
||||
// the original but <= to the prune height. We also delete a key if it has
|
||||
// been tombstoned and its version is <= to the prune height.
|
||||
if prevKeyVersion <= version && (bytes.Equal(prevKey, keyBz) || valTombstoned(prevPrefixedVal)) {
|
||||
if err := batch.Delete(prevKeyPrefixed, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
batchCounter++
|
||||
if batchCounter >= PruneCommitBatchSize {
|
||||
if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
batchCounter = 0
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
prevKey = keyBz
|
||||
prevKeyVersion = keyVersion
|
||||
prevKeyPrefixed = prefixedKey
|
||||
value, err := itr.ValueAndErr()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevPrefixedVal = slices.Clone(value)
|
||||
|
||||
itr.Next()
|
||||
}
|
||||
|
||||
// commit any leftover delete ops in batch
|
||||
if batchCounter > 0 {
|
||||
if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := db.deleteRemovedStoreKeys(version); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.setPruneHeight(version)
|
||||
}
|
||||
|
||||
func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, storeerrors.ErrKeyEmpty
|
||||
}
|
||||
|
||||
if start != nil && end != nil && bytes.Compare(start, end) > 0 {
|
||||
return nil, storeerrors.ErrStartAfterEnd
|
||||
}
|
||||
|
||||
lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0)
|
||||
|
||||
var upperBound []byte
|
||||
if end != nil {
|
||||
upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0)
|
||||
}
|
||||
|
||||
itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, false), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, storeerrors.ErrKeyEmpty
|
||||
}
|
||||
|
||||
if start != nil && end != nil && bytes.Compare(start, end) > 0 {
|
||||
return nil, storeerrors.ErrStartAfterEnd
|
||||
}
|
||||
|
||||
lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0)
|
||||
|
||||
var upperBound []byte
|
||||
if end != nil {
|
||||
upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0)
|
||||
}
|
||||
|
||||
itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, true), nil
|
||||
}
|
||||
|
||||
func (db *Database) PruneStoreKeys(storeKeys []string, version uint64) (err error) {
|
||||
batch := db.storage.NewBatch()
|
||||
defer func() {
|
||||
err = errors.Join(err, batch.Close())
|
||||
}()
|
||||
|
||||
for _, storeKey := range storeKeys {
|
||||
if err := batch.Set([]byte(fmt.Sprintf("%s%s", encoding.BuildPrefixWithVersion(removedStoreKeyPrefix, version), storeKey)), []byte{}, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.Commit(&pebble.WriteOptions{Sync: db.sync})
|
||||
}
|
||||
|
||||
func storePrefix(storeKey []byte) []byte {
|
||||
return []byte(fmt.Sprintf(StorePrefixTpl, storeKey))
|
||||
}
|
||||
|
||||
func prependStoreKey(storeKey, key []byte) []byte {
|
||||
return []byte(fmt.Sprintf("%s%s", storePrefix(storeKey), key))
|
||||
}
|
||||
|
||||
// getEarliestVersion returns the earliest version set in the database.
|
||||
// It is calculated by prune height + 1. If the prune height is not set, it
|
||||
// returns 0.
|
||||
func getEarliestVersion(storage *pebble.DB) (uint64, error) {
|
||||
bz, closer, err := storage.Get([]byte(pruneHeightKey))
|
||||
if err != nil {
|
||||
if errors.Is(err, pebble.ErrNotFound) {
|
||||
// in cases where pruning was never triggered
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(bz) == 0 {
|
||||
return 0, closer.Close()
|
||||
}
|
||||
|
||||
return binary.LittleEndian.Uint64(bz) + 1, closer.Close()
|
||||
}
|
||||
|
||||
func valTombstoned(value []byte) bool {
|
||||
if value == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, tombBz, ok := SplitMVCCKey(value)
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC value.
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", value))
|
||||
}
|
||||
|
||||
// If the tombstone suffix is empty, we consider this a zero value and thus it
|
||||
// is not tombstoned.
|
||||
if len(tombBz) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getMVCCSlice(db *pebble.DB, storeKey, key []byte, version uint64) ([]byte, error) {
|
||||
// end domain is exclusive, so we need to increment the version by 1
|
||||
if version < math.MaxUint64 {
|
||||
version++
|
||||
}
|
||||
|
||||
itr, err := db.NewIter(&pebble.IterOptions{
|
||||
LowerBound: MVCCEncode(prependStoreKey(storeKey, key), 0),
|
||||
UpperBound: MVCCEncode(prependStoreKey(storeKey, key), version),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer itr.Close()
|
||||
|
||||
if !itr.Last() {
|
||||
return nil, storeerrors.ErrRecordNotFound
|
||||
}
|
||||
|
||||
_, vBz, ok := SplitMVCCKey(itr.Key())
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid PebbleDB MVCC key: %s", itr.Key())
|
||||
}
|
||||
|
||||
var keyVersion uint64
|
||||
// handle version 0 (no version prefix)
|
||||
if len(vBz) > 0 {
|
||||
keyVersion, err = decodeUint64Ascending(vBz)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode key version: %w", err)
|
||||
}
|
||||
}
|
||||
if keyVersion > version {
|
||||
return nil, fmt.Errorf("key version too large: %d", keyVersion)
|
||||
}
|
||||
|
||||
value, err := itr.ValueAndErr()
|
||||
return slices.Clone(value), err
|
||||
}
|
||||
|
||||
func (db *Database) deleteRemovedStoreKeys(version uint64) (err error) {
|
||||
batch := db.storage.NewBatch()
|
||||
defer func() {
|
||||
err = errors.Join(err, batch.Close())
|
||||
}()
|
||||
|
||||
end := encoding.BuildPrefixWithVersion(removedStoreKeyPrefix, version+1)
|
||||
storeKeyIter, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: []byte(removedStoreKeyPrefix), UpperBound: end})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer storeKeyIter.Close()
|
||||
|
||||
storeKeys := make(map[string]uint64)
|
||||
prefixLen := len(end)
|
||||
for storeKeyIter.First(); storeKeyIter.Valid(); storeKeyIter.Next() {
|
||||
verBz := storeKeyIter.Key()[len(removedStoreKeyPrefix):prefixLen]
|
||||
v, err := decodeUint64Ascending(verBz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
storeKey := string(storeKeyIter.Key()[prefixLen:])
|
||||
if ev, ok := storeKeys[storeKey]; ok {
|
||||
if ev < v {
|
||||
storeKeys[storeKey] = v
|
||||
}
|
||||
} else {
|
||||
storeKeys[storeKey] = v
|
||||
}
|
||||
if err := batch.Delete(storeKeyIter.Key(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for storeKey, v := range storeKeys {
|
||||
if err := func() error {
|
||||
storeKey := []byte(storeKey)
|
||||
itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: storePrefix(storeKey), UpperBound: storePrefix(util.CopyIncr(storeKey))})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer itr.Close()
|
||||
|
||||
for itr.First(); itr.Valid(); itr.Next() {
|
||||
itrKey := itr.Key()
|
||||
_, verBz, ok := SplitMVCCKey(itrKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid PebbleDB MVCC key: %s", itrKey)
|
||||
}
|
||||
keyVersion, err := decodeUint64Ascending(verBz)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if keyVersion > v {
|
||||
// skip keys that are newer than the version
|
||||
continue
|
||||
}
|
||||
if err := batch.Delete(itr.Key(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if batch.Len() >= batchBufferSize {
|
||||
if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil {
|
||||
return err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return batch.Commit(&pebble.WriteOptions{Sync: true})
|
||||
}
|
||||
@ -1,28 +0,0 @@
|
||||
package pebbledb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
coretesting "cosmossdk.io/core/testing"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
)
|
||||
|
||||
func TestStorageTestSuite(t *testing.T) {
|
||||
s := &storage.StorageTestSuite{
|
||||
NewDB: func(dir string) (*storage.StorageStore, error) {
|
||||
db, err := New(dir)
|
||||
if err == nil && db != nil {
|
||||
// We set sync=false just to speed up CI tests. Operators should take
|
||||
// careful consideration when setting this value in production environments.
|
||||
db.SetSync(false)
|
||||
}
|
||||
|
||||
return storage.NewStorageStore(db, coretesting.NewNopLogger()), err
|
||||
},
|
||||
EmptyBatchSize: 12,
|
||||
}
|
||||
|
||||
suite.Run(t, s)
|
||||
}
|
||||
@ -1,437 +0,0 @@
|
||||
package pebbledb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
)
|
||||
|
||||
var _ corestore.Iterator = (*iterator)(nil)
|
||||
|
||||
// iterator implements the store.Iterator interface. It wraps a PebbleDB iterator
|
||||
// with added MVCC key handling logic. The iterator will iterate over the key space
|
||||
// in the provided domain for a given version. If a key has been written at the
|
||||
// provided version, that key/value pair will be iterated over. Otherwise, the
|
||||
// latest version for that key/value pair will be iterated over s.t. it's less
|
||||
// than the provided version.
|
||||
type iterator struct {
|
||||
source *pebble.Iterator
|
||||
prefix, start, end []byte
|
||||
version uint64
|
||||
valid bool
|
||||
reverse bool
|
||||
}
|
||||
|
||||
func newPebbleDBIterator(src *pebble.Iterator, prefix, mvccStart, mvccEnd []byte, version, earliestVersion uint64, reverse bool) *iterator {
|
||||
if version < earliestVersion {
|
||||
return &iterator{
|
||||
source: src,
|
||||
prefix: prefix,
|
||||
start: mvccStart,
|
||||
end: mvccEnd,
|
||||
version: version,
|
||||
valid: false,
|
||||
reverse: reverse,
|
||||
}
|
||||
}
|
||||
|
||||
// move the underlying PebbleDB iterator to the first key
|
||||
var valid bool
|
||||
if reverse {
|
||||
valid = src.Last()
|
||||
} else {
|
||||
valid = src.First()
|
||||
}
|
||||
|
||||
itr := &iterator{
|
||||
source: src,
|
||||
prefix: prefix,
|
||||
start: mvccStart,
|
||||
end: mvccEnd,
|
||||
version: version,
|
||||
valid: valid,
|
||||
reverse: reverse,
|
||||
}
|
||||
|
||||
if valid {
|
||||
currKey, currKeyVersion, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC value.
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
curKeyVersionDecoded, err := decodeUint64Ascending(currKeyVersion)
|
||||
if err != nil {
|
||||
itr.valid = false
|
||||
return itr
|
||||
}
|
||||
|
||||
// We need to check whether initial key iterator visits has a version <= requested
|
||||
// version. If larger version, call next to find another key which does.
|
||||
if curKeyVersionDecoded > itr.version {
|
||||
itr.Next()
|
||||
} else {
|
||||
// If version is less, seek to the largest version of that key <= requested
|
||||
// iterator version. It is guaranteed this won't move the iterator to a key
|
||||
// that is invalid since curKeyVersionDecoded <= requested iterator version,
|
||||
// so there exists at least one version of currKey SeekLT may move to.
|
||||
itr.valid = itr.source.SeekLT(MVCCEncode(currKey, itr.version+1))
|
||||
}
|
||||
|
||||
// The cursor might now be pointing at a key/value pair that is tombstoned.
|
||||
// If so, we must move the cursor.
|
||||
if itr.valid && itr.cursorTombstoned() {
|
||||
itr.Next()
|
||||
}
|
||||
}
|
||||
return itr
|
||||
}
|
||||
|
||||
// Domain returns the domain of the iterator. The caller must not modify the
|
||||
// return values.
|
||||
func (itr *iterator) Domain() ([]byte, []byte) {
|
||||
return itr.start, itr.end
|
||||
}
|
||||
|
||||
func (itr *iterator) Key() []byte {
|
||||
itr.assertIsValid()
|
||||
|
||||
key, _, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
keyCopy := slices.Clone(key)
|
||||
return keyCopy[len(itr.prefix):]
|
||||
}
|
||||
|
||||
func (itr *iterator) Value() []byte {
|
||||
itr.assertIsValid()
|
||||
|
||||
val, _, ok := SplitMVCCKey(itr.source.Value())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC value.
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
return slices.Clone(val)
|
||||
}
|
||||
|
||||
func (itr *iterator) Next() {
|
||||
if itr.reverse {
|
||||
itr.nextReverse()
|
||||
} else {
|
||||
itr.nextForward()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *iterator) Valid() bool {
|
||||
// once invalid, forever invalid
|
||||
if !itr.valid || !itr.source.Valid() {
|
||||
itr.valid = false
|
||||
return itr.valid
|
||||
}
|
||||
|
||||
// if source has error, consider it invalid
|
||||
if err := itr.source.Error(); err != nil {
|
||||
itr.valid = false
|
||||
return itr.valid
|
||||
}
|
||||
|
||||
// if key is at the end or past it, consider it invalid
|
||||
if end := itr.end; end != nil {
|
||||
if bytes.Compare(end, itr.Key()) <= 0 {
|
||||
itr.valid = false
|
||||
return itr.valid
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr *iterator) Error() error {
|
||||
return itr.source.Error()
|
||||
}
|
||||
|
||||
func (itr *iterator) Close() error {
|
||||
err := itr.source.Close()
|
||||
itr.source = nil
|
||||
itr.valid = false
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (itr *iterator) assertIsValid() {
|
||||
if !itr.valid {
|
||||
panic("iterator is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// cursorTombstoned checks if the current cursor is pointing at a key/value pair
|
||||
// that is tombstoned. If the cursor is tombstoned, <true> is returned, otherwise
|
||||
// <false> is returned. In the case where the iterator is valid but the key/value
|
||||
// pair is tombstoned, the caller should call Next(). Note, this method assumes
|
||||
// the caller assures the iterator is valid first!
|
||||
func (itr *iterator) cursorTombstoned() bool {
|
||||
_, tombBz, ok := SplitMVCCKey(itr.source.Value())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC value.
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
// If the tombstone suffix is empty, we consider this a zero value and thus it
|
||||
// is not tombstoned.
|
||||
if len(tombBz) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the tombstone suffix is non-empty and greater than the target version,
|
||||
// the value is not tombstoned.
|
||||
tombstone, err := decodeUint64Ascending(tombBz)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to decode value tombstone: %w", err))
|
||||
}
|
||||
if tombstone > itr.version {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr *iterator) DebugRawIterate() {
|
||||
valid := itr.source.Valid()
|
||||
if valid {
|
||||
// The first key may not represent the desired target version, so move the
|
||||
// cursor to the correct location.
|
||||
firstKey, _, _ := SplitMVCCKey(itr.source.Key())
|
||||
valid = itr.source.SeekLT(MVCCEncode(firstKey, itr.version+1))
|
||||
}
|
||||
|
||||
var err error
|
||||
for valid {
|
||||
key, vBz, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
var version uint64
|
||||
// handle version 0 (no version prefix)
|
||||
if len(vBz) > 0 {
|
||||
version, err = decodeUint64Ascending(vBz)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to decode key version: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
val, tombBz, ok := SplitMVCCKey(itr.source.Value())
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Value()))
|
||||
}
|
||||
|
||||
var tombstone uint64
|
||||
if len(tombBz) > 0 {
|
||||
tombstone, err = decodeUint64Ascending(vBz)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to decode value tombstone: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", key, val, version, tombstone)
|
||||
|
||||
var next bool
|
||||
if itr.reverse {
|
||||
next = itr.source.SeekLT(MVCCEncode(key, 0))
|
||||
} else {
|
||||
next = itr.source.NextPrefix()
|
||||
}
|
||||
|
||||
if next {
|
||||
nextKey, _, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
// the next key must have itr.prefix as the prefix
|
||||
if !bytes.HasPrefix(nextKey, itr.prefix) {
|
||||
valid = false
|
||||
} else {
|
||||
valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1))
|
||||
}
|
||||
} else {
|
||||
valid = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *iterator) nextForward() {
|
||||
if !itr.source.Valid() {
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
currKey, _, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
next := itr.source.NextPrefix()
|
||||
|
||||
// First move the iterator to the next prefix, which may not correspond to the
|
||||
// desired version for that key, e.g. if the key was written at a later version,
|
||||
// so we seek back to the latest desired version, s.t. the version is <= itr.version.
|
||||
if next {
|
||||
nextKey, _, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.HasPrefix(nextKey, itr.prefix) {
|
||||
// the next key must have itr.prefix as the prefix
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
// Move the iterator to the closest version to the desired version, so we
|
||||
// append the current iterator key to the prefix and seek to that key.
|
||||
itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1))
|
||||
|
||||
tmpKey, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
// There exists cases where the SeekLT() call moved us back to the same key
|
||||
// we started at, so we must move to next key, i.e. two keys forward.
|
||||
if bytes.Equal(tmpKey, currKey) {
|
||||
if itr.source.NextPrefix() {
|
||||
itr.nextForward()
|
||||
|
||||
_, tmpKeyVersion, ok = SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
} else {
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We need to verify that every Next call either moves the iterator to a key
|
||||
// whose version is less than or equal to requested iterator version, or
|
||||
// exhausts the iterator.
|
||||
tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion)
|
||||
if err != nil {
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
// If iterator is at a entry whose version is higher than requested version,
|
||||
// call nextForward again.
|
||||
if tmpKeyVersionDecoded > itr.version {
|
||||
itr.nextForward()
|
||||
}
|
||||
|
||||
// The cursor might now be pointing at a key/value pair that is tombstoned.
|
||||
// If so, we must move the cursor.
|
||||
if itr.valid && itr.cursorTombstoned() {
|
||||
itr.nextForward()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
itr.valid = false
|
||||
}
|
||||
|
||||
func (itr *iterator) nextReverse() {
|
||||
if !itr.source.Valid() {
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
currKey, _, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key()))
|
||||
}
|
||||
|
||||
next := itr.source.SeekLT(MVCCEncode(currKey, 0))
|
||||
|
||||
// First move the iterator to the next prefix, which may not correspond to the
|
||||
// desired version for that key, e.g. if the key was written at a later version,
|
||||
// so we seek back to the latest desired version, s.t. the version is <= itr.version.
|
||||
if next {
|
||||
nextKey, _, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.HasPrefix(nextKey, itr.prefix) {
|
||||
// the next key must have itr.prefix as the prefix
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
// Move the iterator to the closest version to the desired version, so we
|
||||
// append the current iterator key to the prefix and seek to that key.
|
||||
itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1))
|
||||
|
||||
_, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key())
|
||||
if !ok {
|
||||
// XXX: This should not happen as that would indicate we have a malformed
|
||||
// MVCC key.
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
// We need to verify that every Next call either moves the iterator to a key
|
||||
// whose version is less than or equal to requested iterator version, or
|
||||
// exhausts the iterator.
|
||||
tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion)
|
||||
if err != nil {
|
||||
itr.valid = false
|
||||
return
|
||||
}
|
||||
|
||||
// If iterator is at a entry whose version is higher than requested version,
|
||||
// call nextReverse again.
|
||||
if tmpKeyVersionDecoded > itr.version {
|
||||
itr.nextReverse()
|
||||
}
|
||||
|
||||
// The cursor might now be pointing at a key/value pair that is tombstoned.
|
||||
// If so, we must move the cursor.
|
||||
if itr.valid && itr.cursorTombstoned() {
|
||||
itr.nextReverse()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
itr.valid = false
|
||||
}
|
||||
@ -1,67 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/linxGnu/grocksdb"
|
||||
|
||||
"cosmossdk.io/store/v2"
|
||||
)
|
||||
|
||||
var _ store.Batch = (*Batch)(nil)
|
||||
|
||||
type Batch struct {
|
||||
version uint64
|
||||
ts [TimestampSize]byte
|
||||
storage *grocksdb.DB
|
||||
cfHandle *grocksdb.ColumnFamilyHandle
|
||||
batch *grocksdb.WriteBatch
|
||||
}
|
||||
|
||||
// NewBatch creates a new versioned batch used for batch writes. The caller
|
||||
// must ensure to call Write() on the returned batch to commit the changes and to
|
||||
// destroy the batch when done.
|
||||
func NewBatch(db *Database, version uint64) Batch {
|
||||
var ts [TimestampSize]byte
|
||||
binary.LittleEndian.PutUint64(ts[:], version)
|
||||
|
||||
batch := grocksdb.NewWriteBatch()
|
||||
batch.Put([]byte(latestVersionKey), ts[:])
|
||||
|
||||
return Batch{
|
||||
version: version,
|
||||
ts: ts,
|
||||
storage: db.storage,
|
||||
cfHandle: db.cfHandle,
|
||||
batch: batch,
|
||||
}
|
||||
}
|
||||
|
||||
func (b Batch) Size() int {
|
||||
return len(b.batch.Data())
|
||||
}
|
||||
|
||||
func (b Batch) Reset() error {
|
||||
b.batch.Clear()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b Batch) Set(storeKey, key, value []byte) error {
|
||||
prefixedKey := prependStoreKey(storeKey, key)
|
||||
b.batch.PutCFWithTS(b.cfHandle, prefixedKey, b.ts[:], value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b Batch) Delete(storeKey, key []byte) error {
|
||||
prefixedKey := prependStoreKey(storeKey, key)
|
||||
b.batch.DeleteCFWithTS(b.cfHandle, prefixedKey, b.ts[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b Batch) Write() error {
|
||||
defer b.batch.Destroy()
|
||||
return b.storage.Write(defaultWriteOpts, b.batch)
|
||||
}
|
||||
@ -1,76 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/linxGnu/grocksdb"
|
||||
)
|
||||
|
||||
// CreateTSComparator should behavior identical with RocksDB builtin timestamp comparator.
|
||||
// We also use the same builtin comparator name so the builtin tools `ldb`/`sst_dump`
|
||||
// can work with the database.
|
||||
func CreateTSComparator() *grocksdb.Comparator {
|
||||
return grocksdb.NewComparatorWithTimestamp(
|
||||
"leveldb.BytewiseComparator.u64ts",
|
||||
TimestampSize,
|
||||
compare,
|
||||
compareTS,
|
||||
compareWithoutTS,
|
||||
)
|
||||
}
|
||||
|
||||
// compareTS compares timestamp as little endian encoded integers.
|
||||
//
|
||||
// NOTICE: The behavior must be identical to RocksDB builtin comparator
|
||||
// "leveldb.BytewiseComparator.u64ts".
|
||||
func compareTS(bz1, bz2 []byte) int {
|
||||
ts1 := binary.LittleEndian.Uint64(bz1)
|
||||
ts2 := binary.LittleEndian.Uint64(bz2)
|
||||
|
||||
switch {
|
||||
case ts1 < ts2:
|
||||
return -1
|
||||
|
||||
case ts1 > ts2:
|
||||
return 1
|
||||
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// compare compares two internal keys with timestamp suffix, larger timestamp
|
||||
// comes first.
|
||||
//
|
||||
// NOTICE: The behavior must be identical to RocksDB builtin comparator
|
||||
// "leveldb.BytewiseComparator.u64ts".
|
||||
func compare(a, b []byte) int {
|
||||
ret := compareWithoutTS(a, true, b, true)
|
||||
if ret != 0 {
|
||||
return ret
|
||||
}
|
||||
|
||||
// Compare timestamp. For the same user key with different timestamps, larger
|
||||
// (newer) timestamp comes first, which means seek operation will try to find
|
||||
// a version less than or equal to the target version.
|
||||
return -compareTS(a[len(a)-TimestampSize:], b[len(b)-TimestampSize:])
|
||||
}
|
||||
|
||||
// compareWithoutTS compares two internal keys without the timestamp part.
|
||||
//
|
||||
// NOTICE: the behavior must be identical to RocksDB builtin comparator
|
||||
// "leveldb.BytewiseComparator.u64ts".
|
||||
func compareWithoutTS(a []byte, aHasTS bool, b []byte, bHasTS bool) int {
|
||||
if aHasTS {
|
||||
a = a[:len(a)-TimestampSize]
|
||||
}
|
||||
if bHasTS {
|
||||
b = b[:len(b)-TimestampSize]
|
||||
}
|
||||
|
||||
return bytes.Compare(a, b)
|
||||
}
|
||||
@ -1,251 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/linxGnu/grocksdb"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/errors"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/util"
|
||||
)
|
||||
|
||||
const (
|
||||
TimestampSize = 8
|
||||
|
||||
StorePrefixTpl = "s/k:%s/"
|
||||
latestVersionKey = "s/latest"
|
||||
)
|
||||
|
||||
var (
|
||||
_ storage.Database = (*Database)(nil)
|
||||
_ store.UpgradableDatabase = (*Database)(nil)
|
||||
|
||||
defaultWriteOpts = grocksdb.NewDefaultWriteOptions()
|
||||
defaultReadOpts = grocksdb.NewDefaultReadOptions()
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
storage *grocksdb.DB
|
||||
cfHandle *grocksdb.ColumnFamilyHandle
|
||||
|
||||
// tsLow reflects the full_history_ts_low CF value, which is earliest version
|
||||
// supported
|
||||
tsLow uint64
|
||||
}
|
||||
|
||||
func New(dataDir string) (*Database, error) {
|
||||
storage, cfHandle, err := OpenRocksDB(dataDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open RocksDB: %w", err)
|
||||
}
|
||||
|
||||
slice, err := storage.GetFullHistoryTsLow(cfHandle)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err)
|
||||
}
|
||||
|
||||
var tsLow uint64
|
||||
tsLowBz := copyAndFreeSlice(slice)
|
||||
if len(tsLowBz) > 0 {
|
||||
tsLow = binary.LittleEndian.Uint64(tsLowBz)
|
||||
}
|
||||
|
||||
return &Database{
|
||||
storage: storage,
|
||||
cfHandle: cfHandle,
|
||||
tsLow: tsLow,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewWithDB(storage *grocksdb.DB, cfHandle *grocksdb.ColumnFamilyHandle) (*Database, error) {
|
||||
slice, err := storage.GetFullHistoryTsLow(cfHandle)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err)
|
||||
}
|
||||
|
||||
var tsLow uint64
|
||||
tsLowBz := copyAndFreeSlice(slice)
|
||||
if len(tsLowBz) > 0 {
|
||||
tsLow = binary.LittleEndian.Uint64(tsLowBz)
|
||||
}
|
||||
|
||||
return &Database{
|
||||
storage: storage,
|
||||
cfHandle: cfHandle,
|
||||
tsLow: tsLow,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (db *Database) Close() error {
|
||||
db.storage.Close()
|
||||
|
||||
db.storage = nil
|
||||
db.cfHandle = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) NewBatch(version uint64) (store.Batch, error) {
|
||||
return NewBatch(db, version), nil
|
||||
}
|
||||
|
||||
func (db *Database) getSlice(storeKey []byte, version uint64, key []byte) (*grocksdb.Slice, error) {
|
||||
if version < db.tsLow {
|
||||
return nil, errors.ErrVersionPruned{EarliestVersion: db.tsLow, RequestedVersion: version}
|
||||
}
|
||||
|
||||
return db.storage.GetCF(
|
||||
newTSReadOptions(version),
|
||||
db.cfHandle,
|
||||
prependStoreKey(storeKey, key),
|
||||
)
|
||||
}
|
||||
|
||||
func (db *Database) SetLatestVersion(version uint64) error {
|
||||
var ts [TimestampSize]byte
|
||||
binary.LittleEndian.PutUint64(ts[:], version)
|
||||
|
||||
return db.storage.Put(defaultWriteOpts, []byte(latestVersionKey), ts[:])
|
||||
}
|
||||
|
||||
func (db *Database) GetLatestVersion() (uint64, error) {
|
||||
bz, err := db.storage.GetBytes(defaultReadOpts, []byte(latestVersionKey))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(bz) == 0 {
|
||||
// in case of a fresh database
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return binary.LittleEndian.Uint64(bz), nil
|
||||
}
|
||||
|
||||
func (db *Database) VersionExists(version uint64) (bool, error) {
|
||||
latestVersion, err := db.GetLatestVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return latestVersion >= version && version >= db.tsLow, nil
|
||||
}
|
||||
|
||||
func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
slice, err := db.getSlice(storeKey, version, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return slice.Exists(), nil
|
||||
}
|
||||
|
||||
func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) {
|
||||
slice, err := db.getSlice(storeKey, version, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get RocksDB slice: %w", err)
|
||||
}
|
||||
|
||||
return copyAndFreeSlice(slice), nil
|
||||
}
|
||||
|
||||
// Prune prunes all versions up to and including the provided version argument.
|
||||
// Internally, this performs a manual compaction, the data with older timestamp
|
||||
// will be GCed by compaction.
|
||||
func (db *Database) Prune(version uint64) error {
|
||||
tsLow := version + 1 // we increment by 1 to include the provided version
|
||||
|
||||
var ts [TimestampSize]byte
|
||||
binary.LittleEndian.PutUint64(ts[:], tsLow)
|
||||
compactOpts := grocksdb.NewCompactRangeOptions()
|
||||
compactOpts.SetFullHistoryTsLow(ts[:])
|
||||
db.storage.CompactRangeCFOpt(db.cfHandle, grocksdb.Range{}, compactOpts)
|
||||
|
||||
db.tsLow = tsLow
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, errors.ErrKeyEmpty
|
||||
}
|
||||
|
||||
if start != nil && end != nil && bytes.Compare(start, end) > 0 {
|
||||
return nil, errors.ErrStartAfterEnd
|
||||
}
|
||||
|
||||
prefix := storePrefix(storeKey)
|
||||
start, end = util.IterateWithPrefix(prefix, start, end)
|
||||
|
||||
itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle)
|
||||
return newRocksDBIterator(itr, prefix, start, end, false), nil
|
||||
}
|
||||
|
||||
func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
|
||||
return nil, errors.ErrKeyEmpty
|
||||
}
|
||||
|
||||
if start != nil && end != nil && bytes.Compare(start, end) > 0 {
|
||||
return nil, errors.ErrStartAfterEnd
|
||||
}
|
||||
|
||||
prefix := storePrefix(storeKey)
|
||||
start, end = util.IterateWithPrefix(prefix, start, end)
|
||||
|
||||
itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle)
|
||||
return newRocksDBIterator(itr, prefix, start, end, true), nil
|
||||
}
|
||||
|
||||
// PruneStoreKeys will do nothing for RocksDB, it will be pruned by compaction
|
||||
// when the version is pruned
|
||||
func (db *Database) PruneStoreKeys(_ []string, _ uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// newTSReadOptions returns ReadOptions used in the RocksDB column family read.
|
||||
func newTSReadOptions(version uint64) *grocksdb.ReadOptions {
|
||||
var ts [TimestampSize]byte
|
||||
binary.LittleEndian.PutUint64(ts[:], version)
|
||||
|
||||
readOpts := grocksdb.NewDefaultReadOptions()
|
||||
readOpts.SetTimestamp(ts[:])
|
||||
|
||||
return readOpts
|
||||
}
|
||||
|
||||
func storePrefix(storeKey []byte) []byte {
|
||||
return []byte(fmt.Sprintf(StorePrefixTpl, storeKey))
|
||||
}
|
||||
|
||||
func prependStoreKey(storeKey, key []byte) []byte {
|
||||
return []byte(fmt.Sprintf("%s%s", storePrefix(storeKey), key))
|
||||
}
|
||||
|
||||
// copyAndFreeSlice will copy a given RocksDB slice and free it. If the slice does
|
||||
// not exist, <nil> will be returned.
|
||||
func copyAndFreeSlice(s *grocksdb.Slice) []byte {
|
||||
defer s.Free()
|
||||
if !s.Exists() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return slices.Clone(s.Data())
|
||||
}
|
||||
|
||||
func readOnlySlice(s *grocksdb.Slice) []byte {
|
||||
if !s.Exists() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.Data()
|
||||
}
|
||||
@ -1,70 +0,0 @@
|
||||
//go:build !rocksdb
|
||||
// +build !rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
_ storage.Database = (*Database)(nil)
|
||||
_ store.UpgradableDatabase = (*Database)(nil)
|
||||
)
|
||||
|
||||
type Database struct{}
|
||||
|
||||
func New(dataDir string) (*Database, error) {
|
||||
return &Database{}, nil
|
||||
}
|
||||
|
||||
func (db *Database) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Database) NewBatch(version uint64) (store.Batch, error) {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
func (db *Database) SetLatestVersion(version uint64) error {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
func (db *Database) GetLatestVersion() (uint64, error) {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
func (db *Database) VersionExists(version uint64) (bool, error) {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
// Prune prunes all versions up to and including the provided version argument.
|
||||
// Internally, this performs a manual compaction, the data with older timestamp
|
||||
// will be GCed by compaction.
|
||||
func (db *Database) Prune(version uint64) error {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
panic("rocksdb requires a build flag")
|
||||
}
|
||||
|
||||
// PruneStoreKeys will do nothing for RocksDB, it will be pruned by compaction
|
||||
// when the version is pruned
|
||||
func (db *Database) PruneStoreKeys(_ []string, _ uint64) error {
|
||||
return nil
|
||||
}
|
||||
@ -1,90 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
coretesting "cosmossdk.io/core/testing"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
)
|
||||
|
||||
var storeKey1 = []byte("store1")
|
||||
|
||||
func TestStorageTestSuite(t *testing.T) {
|
||||
s := &storage.StorageTestSuite{
|
||||
NewDB: func(dir string) (*storage.StorageStore, error) {
|
||||
db, err := New(dir)
|
||||
return storage.NewStorageStore(db, coretesting.NewNopLogger()), err
|
||||
},
|
||||
EmptyBatchSize: 12,
|
||||
SkipTests: []string{"TestUpgradable_Prune"},
|
||||
}
|
||||
suite.Run(t, s)
|
||||
}
|
||||
|
||||
func TestDatabase_ReverseIterator(t *testing.T) {
|
||||
db, err := New(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer db.Close()
|
||||
|
||||
batch := NewBatch(db, 1)
|
||||
for i := 0; i < 100; i++ {
|
||||
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
|
||||
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
|
||||
|
||||
require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val)))
|
||||
}
|
||||
|
||||
require.NoError(t, batch.Write())
|
||||
|
||||
// reverse iterator without an end key
|
||||
iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer iter.Close()
|
||||
|
||||
i, count := 99, 0
|
||||
for ; iter.Valid(); iter.Next() {
|
||||
require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key())
|
||||
require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value())
|
||||
|
||||
i--
|
||||
count++
|
||||
}
|
||||
require.Equal(t, 100, count)
|
||||
require.NoError(t, iter.Error())
|
||||
|
||||
// seek past domain, which should make the iterator invalid and produce an error
|
||||
require.False(t, iter.Valid())
|
||||
|
||||
// reverse iterator with a start and end domain
|
||||
iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019"))
|
||||
require.NoError(t, err)
|
||||
|
||||
defer iter2.Close()
|
||||
|
||||
i, count = 18, 0
|
||||
for ; iter2.Valid(); iter2.Next() {
|
||||
require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key())
|
||||
require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value())
|
||||
|
||||
i--
|
||||
count++
|
||||
}
|
||||
require.Equal(t, 9, count)
|
||||
require.NoError(t, iter2.Error())
|
||||
|
||||
// seek past domain, which should make the iterator invalid and produce an error
|
||||
require.False(t, iter2.Valid())
|
||||
|
||||
// start must be <= end
|
||||
iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019"))
|
||||
require.Error(t, err)
|
||||
require.Nil(t, iter3)
|
||||
}
|
||||
@ -1,159 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/linxGnu/grocksdb"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
)
|
||||
|
||||
var _ corestore.Iterator = (*iterator)(nil)
|
||||
|
||||
type iterator struct {
|
||||
source *grocksdb.Iterator
|
||||
prefix, start, end []byte
|
||||
reverse bool
|
||||
invalid bool
|
||||
}
|
||||
|
||||
func newRocksDBIterator(source *grocksdb.Iterator, prefix, start, end []byte, reverse bool) *iterator {
|
||||
if reverse {
|
||||
if end == nil {
|
||||
source.SeekToLast()
|
||||
} else {
|
||||
source.Seek(end)
|
||||
|
||||
if source.Valid() {
|
||||
eoaKey := readOnlySlice(source.Key()) // end or after key
|
||||
if bytes.Compare(end, eoaKey) <= 0 {
|
||||
source.Prev()
|
||||
}
|
||||
} else {
|
||||
source.SeekToLast()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if start == nil {
|
||||
source.SeekToFirst()
|
||||
} else {
|
||||
source.Seek(start)
|
||||
}
|
||||
}
|
||||
|
||||
return &iterator{
|
||||
source: source,
|
||||
prefix: prefix,
|
||||
start: start,
|
||||
end: end,
|
||||
reverse: reverse,
|
||||
invalid: !source.Valid(),
|
||||
}
|
||||
}
|
||||
|
||||
// Domain returns the domain of the iterator. The caller must not modify the
|
||||
// return values.
|
||||
func (itr *iterator) Domain() ([]byte, []byte) {
|
||||
start := itr.start
|
||||
if start != nil {
|
||||
start = start[len(itr.prefix):]
|
||||
if len(start) == 0 {
|
||||
start = nil
|
||||
}
|
||||
}
|
||||
|
||||
end := itr.end
|
||||
if end != nil {
|
||||
end = end[len(itr.prefix):]
|
||||
if len(end) == 0 {
|
||||
end = nil
|
||||
}
|
||||
}
|
||||
|
||||
return start, end
|
||||
}
|
||||
|
||||
func (itr *iterator) Valid() bool {
|
||||
// once invalid, forever invalid
|
||||
if itr.invalid {
|
||||
return false
|
||||
}
|
||||
|
||||
// if source has error, consider it invalid
|
||||
if err := itr.source.Err(); err != nil {
|
||||
itr.invalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
// if source is invalid, consider it invalid
|
||||
if !itr.source.Valid() {
|
||||
itr.invalid = true
|
||||
return false
|
||||
}
|
||||
|
||||
// if key is at the end or past it, consider it invalid
|
||||
start := itr.start
|
||||
end := itr.end
|
||||
key := readOnlySlice(itr.source.Key())
|
||||
|
||||
if itr.reverse {
|
||||
if start != nil && bytes.Compare(key, start) < 0 {
|
||||
itr.invalid = true
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if end != nil && bytes.Compare(end, key) <= 0 {
|
||||
itr.invalid = true
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (itr *iterator) Key() []byte {
|
||||
itr.assertIsValid()
|
||||
return copyAndFreeSlice(itr.source.Key())[len(itr.prefix):]
|
||||
}
|
||||
|
||||
func (itr *iterator) Value() []byte {
|
||||
itr.assertIsValid()
|
||||
return copyAndFreeSlice(itr.source.Value())
|
||||
}
|
||||
|
||||
func (itr *iterator) Timestamp() []byte {
|
||||
return itr.source.Timestamp().Data()
|
||||
}
|
||||
|
||||
func (itr iterator) Next() {
|
||||
if itr.invalid {
|
||||
return
|
||||
}
|
||||
|
||||
if itr.reverse {
|
||||
itr.source.Prev()
|
||||
} else {
|
||||
itr.source.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func (itr *iterator) Error() error {
|
||||
return itr.source.Err()
|
||||
}
|
||||
|
||||
func (itr *iterator) Close() error {
|
||||
itr.source.Close()
|
||||
itr.source = nil
|
||||
itr.invalid = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (itr *iterator) assertIsValid() {
|
||||
if itr.invalid {
|
||||
panic("iterator is invalid")
|
||||
}
|
||||
}
|
||||
@ -1,125 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"runtime"
|
||||
|
||||
"github.com/linxGnu/grocksdb"
|
||||
)
|
||||
|
||||
const (
|
||||
// CFNameStateStorage defines the RocksDB column family name for versioned state
|
||||
// storage.
|
||||
CFNameStateStorage = "state_storage"
|
||||
|
||||
// CFNameDefault defines the RocksDB column family name for the default column.
|
||||
CFNameDefault = "default"
|
||||
)
|
||||
|
||||
// NewRocksDBOpts returns the options used for the RocksDB column family for use
|
||||
// in state storage.
|
||||
//
|
||||
// FIXME: We do not enable dict compression for SSTFileWriter, because otherwise
|
||||
// the file writer won't report correct file size.
|
||||
// Ref: https://github.com/facebook/rocksdb/issues/11146
|
||||
func NewRocksDBOpts(sstFileWriter bool) *grocksdb.Options {
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
opts.SetComparator(CreateTSComparator())
|
||||
opts.IncreaseParallelism(runtime.NumCPU())
|
||||
opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024)
|
||||
opts.SetTargetFileSizeMultiplier(2)
|
||||
opts.SetLevelCompactionDynamicLevelBytes(true)
|
||||
|
||||
// block based table options
|
||||
bbto := grocksdb.NewDefaultBlockBasedTableOptions()
|
||||
|
||||
// 1G block cache
|
||||
bbto.SetBlockSize(32 * 1024)
|
||||
bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30))
|
||||
|
||||
bbto.SetFilterPolicy(grocksdb.NewRibbonHybridFilterPolicy(9.9, 1))
|
||||
bbto.SetIndexType(grocksdb.KBinarySearchWithFirstKey)
|
||||
bbto.SetOptimizeFiltersForMemory(true)
|
||||
opts.SetBlockBasedTableFactory(bbto)
|
||||
|
||||
// Improve sst file creation speed: compaction or sst file writer.
|
||||
opts.SetCompressionOptionsParallelThreads(4)
|
||||
|
||||
if !sstFileWriter {
|
||||
// compression options at bottommost level
|
||||
opts.SetBottommostCompression(grocksdb.ZSTDCompression)
|
||||
|
||||
compressOpts := grocksdb.NewDefaultCompressionOptions()
|
||||
compressOpts.MaxDictBytes = 112640 // 110k
|
||||
compressOpts.Level = 12
|
||||
|
||||
opts.SetBottommostCompressionOptions(compressOpts, true)
|
||||
opts.SetBottommostCompressionOptionsZstdMaxTrainBytes(compressOpts.MaxDictBytes*100, true)
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// OpenRocksDB opens a RocksDB database connection for versioned reading and writing.
|
||||
// It also returns a column family handle for versioning using user-defined timestamps.
|
||||
// The default column family is used for metadata, specifically key/value pairs
|
||||
// that are stored on another column family named with "state_storage", which has
|
||||
// user-defined timestamp enabled.
|
||||
func OpenRocksDB(dataDir string) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) {
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
opts.SetCreateIfMissingColumnFamilies(true)
|
||||
|
||||
db, cfHandles, err := grocksdb.OpenDbColumnFamilies(
|
||||
opts,
|
||||
dataDir,
|
||||
[]string{
|
||||
CFNameDefault,
|
||||
CFNameStateStorage,
|
||||
},
|
||||
[]*grocksdb.Options{
|
||||
opts,
|
||||
NewRocksDBOpts(false),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return db, cfHandles[1], nil
|
||||
}
|
||||
|
||||
// OpenRocksDBAndTrimHistory opens a RocksDB handle similar to `OpenRocksDB`,
|
||||
// but it also trims the versions newer than target one, such that it can be used
|
||||
// for rollback.
|
||||
func OpenRocksDBAndTrimHistory(dataDir string, version int64) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) {
|
||||
var ts [TimestampSize]byte
|
||||
binary.LittleEndian.PutUint64(ts[:], uint64(version))
|
||||
|
||||
opts := grocksdb.NewDefaultOptions()
|
||||
opts.SetCreateIfMissing(true)
|
||||
opts.SetCreateIfMissingColumnFamilies(true)
|
||||
|
||||
db, cfHandles, err := grocksdb.OpenDbAndTrimHistory(
|
||||
opts,
|
||||
dataDir,
|
||||
[]string{
|
||||
CFNameDefault,
|
||||
CFNameStateStorage,
|
||||
},
|
||||
[]*grocksdb.Options{
|
||||
opts,
|
||||
NewRocksDBOpts(false),
|
||||
},
|
||||
ts[:],
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return db, cfHandles[1], nil
|
||||
}
|
||||
@ -1,182 +0,0 @@
|
||||
//go:build rocksdb
|
||||
// +build rocksdb
|
||||
|
||||
package storage_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
corestore "cosmossdk.io/core/store"
|
||||
coretesting "cosmossdk.io/core/testing"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/storage"
|
||||
"cosmossdk.io/store/v2/storage/pebbledb"
|
||||
"cosmossdk.io/store/v2/storage/rocksdb"
|
||||
)
|
||||
|
||||
var storeKey1 = []byte("store1")
|
||||
|
||||
var (
|
||||
backends = map[string]func(dataDir string) (store.VersionedWriter, error){
|
||||
"rocksdb_versiondb_opts": func(dataDir string) (store.VersionedWriter, error) {
|
||||
db, err := rocksdb.New(dataDir)
|
||||
return storage.NewStorageStore(db, coretesting.NewNopLogger()), err
|
||||
},
|
||||
"pebbledb_default_opts": func(dataDir string) (store.VersionedWriter, error) {
|
||||
db, err := pebbledb.New(dataDir)
|
||||
if err == nil && db != nil {
|
||||
db.SetSync(false)
|
||||
}
|
||||
|
||||
return storage.NewStorageStore(db, coretesting.NewNopLogger()), err
|
||||
},
|
||||
}
|
||||
rng = rand.New(rand.NewSource(567320))
|
||||
)
|
||||
|
||||
func BenchmarkGet(b *testing.B) {
|
||||
numKeyVals := 1_000_000
|
||||
keys := make([][]byte, numKeyVals)
|
||||
vals := make([][]byte, numKeyVals)
|
||||
for i := 0; i < numKeyVals; i++ {
|
||||
key := make([]byte, 128)
|
||||
val := make([]byte, 128)
|
||||
|
||||
_, err := rng.Read(key)
|
||||
require.NoError(b, err)
|
||||
_, err = rng.Read(val)
|
||||
require.NoError(b, err)
|
||||
|
||||
keys[i] = key
|
||||
vals[i] = val
|
||||
}
|
||||
|
||||
for ty, fn := range backends {
|
||||
db, err := fn(b.TempDir())
|
||||
require.NoError(b, err)
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
}()
|
||||
|
||||
cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{string(storeKey1): {}})
|
||||
for i := 0; i < numKeyVals; i++ {
|
||||
cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]})
|
||||
}
|
||||
|
||||
require.NoError(b, db.ApplyChangeset(cs))
|
||||
|
||||
b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
key := keys[rng.Intn(len(keys))]
|
||||
|
||||
b.StartTimer()
|
||||
_, err = db.Get(storeKey1, 1, key)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkApplyChangeset(b *testing.B) {
|
||||
for ty, fn := range backends {
|
||||
db, err := fn(b.TempDir())
|
||||
require.NoError(b, err)
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
}()
|
||||
|
||||
b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
||||
ver := uint64(b.N + 1)
|
||||
cs := corestore.NewChangesetWithPairs(ver, map[string]corestore.KVPairs{string(storeKey1): {}})
|
||||
for j := 0; j < 1000; j++ {
|
||||
key := make([]byte, 128)
|
||||
val := make([]byte, 128)
|
||||
|
||||
_, err = rng.Read(key)
|
||||
require.NoError(b, err)
|
||||
_, err = rng.Read(val)
|
||||
require.NoError(b, err)
|
||||
|
||||
cs.AddKVPair(storeKey1, corestore.KVPair{Key: key, Value: val})
|
||||
}
|
||||
|
||||
b.StartTimer()
|
||||
require.NoError(b, db.ApplyChangeset(cs))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIterate(b *testing.B) {
|
||||
numKeyVals := 1_000_000
|
||||
keys := make([][]byte, numKeyVals)
|
||||
vals := make([][]byte, numKeyVals)
|
||||
for i := 0; i < numKeyVals; i++ {
|
||||
key := make([]byte, 128)
|
||||
val := make([]byte, 128)
|
||||
|
||||
_, err := rng.Read(key)
|
||||
require.NoError(b, err)
|
||||
_, err = rng.Read(val)
|
||||
require.NoError(b, err)
|
||||
|
||||
keys[i] = key
|
||||
vals[i] = val
|
||||
|
||||
}
|
||||
|
||||
for ty, fn := range backends {
|
||||
db, err := fn(b.TempDir())
|
||||
require.NoError(b, err)
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
}()
|
||||
|
||||
b.StopTimer()
|
||||
|
||||
cs := corestore.NewChangesetWithPairs(1, map[string]corestore.KVPairs{string(storeKey1): {}})
|
||||
for i := 0; i < numKeyVals; i++ {
|
||||
cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]})
|
||||
}
|
||||
|
||||
require.NoError(b, db.ApplyChangeset(cs))
|
||||
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
return bytes.Compare(keys[i], keys[j]) < 0
|
||||
})
|
||||
|
||||
b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
||||
itr, err := db.Iterator(storeKey1, 1, keys[0], nil)
|
||||
require.NoError(b, err)
|
||||
|
||||
b.StartTimer()
|
||||
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
_ = itr.Key()
|
||||
_ = itr.Value()
|
||||
}
|
||||
|
||||
require.NoError(b, itr.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,162 +0,0 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"cosmossdk.io/core/log"
|
||||
corestore "cosmossdk.io/core/store"
|
||||
"cosmossdk.io/store/v2"
|
||||
"cosmossdk.io/store/v2/snapshots"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: it is a random number, need to be tuned
|
||||
defaultBatchBufferSize = 100000
|
||||
)
|
||||
|
||||
var (
|
||||
_ store.VersionedWriter = (*StorageStore)(nil)
|
||||
_ snapshots.StorageSnapshotter = (*StorageStore)(nil)
|
||||
_ store.Pruner = (*StorageStore)(nil)
|
||||
_ store.UpgradableDatabase = (*StorageStore)(nil)
|
||||
)
|
||||
|
||||
// StorageStore is a wrapper around the store.VersionedWriter interface.
|
||||
type StorageStore struct {
|
||||
logger log.Logger
|
||||
db Database
|
||||
}
|
||||
|
||||
// NewStorageStore returns a reference to a new StorageStore.
|
||||
func NewStorageStore(db Database, logger log.Logger) *StorageStore {
|
||||
return &StorageStore{
|
||||
logger: logger,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Has returns true if the key exists in the store.
|
||||
func (ss *StorageStore) Has(storeKey []byte, version uint64, key []byte) (bool, error) {
|
||||
return ss.db.Has(storeKey, version, key)
|
||||
}
|
||||
|
||||
// Get returns the value associated with the given key.
|
||||
func (ss *StorageStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) {
|
||||
return ss.db.Get(storeKey, version, key)
|
||||
}
|
||||
|
||||
// ApplyChangeset applies the given changeset to the storage.
|
||||
func (ss *StorageStore) ApplyChangeset(cs *corestore.Changeset) error {
|
||||
b, err := ss.db.NewBatch(cs.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pairs := range cs.Changes {
|
||||
for _, kvPair := range pairs.StateChanges {
|
||||
if kvPair.Remove {
|
||||
if err := b.Delete(pairs.Actor, kvPair.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := b.Set(pairs.Actor, kvPair.Key, kvPair.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.Write(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLatestVersion returns the latest version of the store.
|
||||
func (ss *StorageStore) GetLatestVersion() (uint64, error) {
|
||||
return ss.db.GetLatestVersion()
|
||||
}
|
||||
|
||||
// SetLatestVersion sets the latest version of the store.
|
||||
func (ss *StorageStore) SetLatestVersion(version uint64) error {
|
||||
return ss.db.SetLatestVersion(version)
|
||||
}
|
||||
|
||||
// VersionExists returns true if the given version exists in the store.
|
||||
func (ss *StorageStore) VersionExists(version uint64) (bool, error) {
|
||||
return ss.db.VersionExists(version)
|
||||
}
|
||||
|
||||
// Iterator returns an iterator over the specified domain and prefix.
|
||||
func (ss *StorageStore) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
return ss.db.Iterator(storeKey, version, start, end)
|
||||
}
|
||||
|
||||
// ReverseIterator returns an iterator over the specified domain and prefix in reverse.
|
||||
func (ss *StorageStore) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) {
|
||||
return ss.db.ReverseIterator(storeKey, version, start, end)
|
||||
}
|
||||
|
||||
// Prune prunes the store up to the given version.
|
||||
func (ss *StorageStore) Prune(version uint64) error {
|
||||
return ss.db.Prune(version)
|
||||
}
|
||||
|
||||
// Restore restores the store from the given channel.
|
||||
func (ss *StorageStore) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error {
|
||||
latestVersion, err := ss.db.GetLatestVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest version: %w", err)
|
||||
}
|
||||
if version <= latestVersion {
|
||||
return fmt.Errorf("the snapshot version %d is not greater than latest version %d", version, latestVersion)
|
||||
}
|
||||
|
||||
b, err := ss.db.NewBatch(version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for kvPair := range chStorage {
|
||||
for _, kv := range kvPair.StateChanges {
|
||||
if err := b.Set(kvPair.Actor, kv.Key, kv.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.Size() > defaultBatchBufferSize {
|
||||
if err := b.Write(); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = ss.db.NewBatch(version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if b.Size() > 0 {
|
||||
if err := b.Write(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneStoreKeys prunes the store keys which implements the store.UpgradableDatabase
|
||||
// interface.
|
||||
func (ss *StorageStore) PruneStoreKeys(storeKeys []string, version uint64) error {
|
||||
gdb, ok := ss.db.(store.UpgradableDatabase)
|
||||
if !ok {
|
||||
return errors.New("db does not implement UpgradableDatabase interface")
|
||||
}
|
||||
|
||||
return gdb.PruneStoreKeys(storeKeys, version)
|
||||
}
|
||||
|
||||
// Close closes the store.
|
||||
func (ss *StorageStore) Close() error {
|
||||
return ss.db.Close()
|
||||
}
|
||||
@ -1,53 +0,0 @@
|
||||
package util
|
||||
|
||||
// IterateWithPrefix returns the begin and end keys for an iterator over a domain
|
||||
// and prefix.
|
||||
func IterateWithPrefix(prefix, begin, end []byte) ([]byte, []byte) {
|
||||
if len(prefix) == 0 {
|
||||
return begin, end
|
||||
}
|
||||
|
||||
begin = cloneAppend(prefix, begin)
|
||||
|
||||
if end == nil {
|
||||
end = CopyIncr(prefix)
|
||||
} else {
|
||||
end = cloneAppend(prefix, end)
|
||||
}
|
||||
|
||||
return begin, end
|
||||
}
|
||||
|
||||
func cloneAppend(front, tail []byte) (res []byte) {
|
||||
res = make([]byte, len(front)+len(tail))
|
||||
|
||||
n := copy(res, front)
|
||||
copy(res[n:], tail)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func CopyIncr(bz []byte) []byte {
|
||||
if len(bz) == 0 {
|
||||
panic("copyIncr expects non-zero bz length")
|
||||
}
|
||||
|
||||
ret := make([]byte, len(bz))
|
||||
copy(ret, bz)
|
||||
|
||||
for i := len(bz) - 1; i >= 0; i-- {
|
||||
if ret[i] < byte(0xFF) {
|
||||
ret[i]++
|
||||
return ret
|
||||
}
|
||||
|
||||
ret[i] = byte(0x00)
|
||||
|
||||
if i == 0 {
|
||||
// overflow
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -61,9 +61,6 @@ type RootStore interface {
|
||||
|
||||
// Backend defines the interface for the RootStore backends.
|
||||
type Backend interface {
|
||||
// GetStateStorage returns the SS backend.
|
||||
GetStateStorage() VersionedWriter
|
||||
|
||||
// GetStateCommitment returns the SC backend.
|
||||
GetStateCommitment() Committer
|
||||
}
|
||||
|
||||
@ -51,12 +51,14 @@ func TestBaseAccount(t *testing.T) {
|
||||
}
|
||||
|
||||
func sendTx(t *testing.T, ctx sdk.Context, app *simapp.SimApp, sender []byte, msg sdk.Msg) {
|
||||
t.Helper()
|
||||
tx := sign(t, ctx, app, sender, privKey, msg)
|
||||
_, _, err := app.SimDeliver(app.TxEncode, tx)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress, privKey cryptotypes.PrivKey, msg sdk.Msg) sdk.Tx {
|
||||
t.Helper()
|
||||
r := rand.New(rand.NewSource(0))
|
||||
|
||||
accNum, err := app.AccountsKeeper.AccountByNumber.Get(ctx, from)
|
||||
@ -81,12 +83,14 @@ func sign(t *testing.T, ctx sdk.Context, app *simapp.SimApp, from sdk.AccAddress
|
||||
}
|
||||
|
||||
func bechify(t *testing.T, app *simapp.SimApp, addr []byte) string {
|
||||
t.Helper()
|
||||
bech32, err := app.AuthKeeper.AddressCodec().BytesToString(addr)
|
||||
require.NoError(t, err)
|
||||
return bech32
|
||||
}
|
||||
|
||||
func fundAccount(t *testing.T, app *simapp.SimApp, ctx sdk.Context, addr sdk.AccAddress, amt string) {
|
||||
t.Helper()
|
||||
require.NoError(t, testutil.FundAccount(ctx, app.BankKeeper, addr, coins(t, amt)))
|
||||
}
|
||||
|
||||
|
||||
@ -209,6 +209,7 @@ func TestMsgServer_ExecuteBundle(t *testing.T) {
|
||||
}
|
||||
|
||||
func makeTx(t *testing.T, msg gogoproto.Message, sig []byte, xt *account_abstractionv1.TxExtension) []byte {
|
||||
t.Helper()
|
||||
anyMsg, err := codectypes.NewAnyWithValue(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@ -22,7 +22,6 @@ import (
|
||||
_ "cosmossdk.io/x/consensus" // import as blank for app wiring
|
||||
_ "cosmossdk.io/x/staking" // import as blank for app wirings
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"github.com/cosmos/cosmos-sdk/tests/integration/v2"
|
||||
"github.com/cosmos/cosmos-sdk/testutil/configurator"
|
||||
_ "github.com/cosmos/cosmos-sdk/x/auth" // import as blank for app wiring
|
||||
@ -35,7 +34,6 @@ import (
|
||||
type suite struct {
|
||||
app *integration.App
|
||||
|
||||
cdc codec.Codec
|
||||
ctx context.Context
|
||||
|
||||
authKeeper authkeeper.AccountKeeper
|
||||
|
||||
Loading…
Reference in New Issue
Block a user