feat(store/v2): Merge Feature Branch (#18150)

Co-authored-by: cool-developer <51834436+cool-develope@users.noreply.github.com>
Co-authored-by: yihuang <huang@crypto.com>
This commit is contained in:
Aleksandr Bezobchuk 2023-10-18 14:03:43 -04:00 committed by GitHub
parent 7b90fc0664
commit 03bca7b791
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
181 changed files with 6211 additions and 16235 deletions

View File

@ -14,6 +14,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: actions/setup-go@v4
with:
go-version: "1.21"
@ -29,7 +31,7 @@ jobs:
if: env.GIT_DIFF
id: lint_long
run: |
make lint
nix develop -c make lint
- uses: technote-space/get-diff-action@v6.1.2
if: steps.lint_long.outcome == 'skipped'
id: git_diff_all
@ -43,7 +45,7 @@ jobs:
- name: run linting (short)
if: steps.lint_long.outcome == 'skipped' && env.GIT_DIFF
run: |
make lint
nix develop -c make lint
env:
GIT_DIFF: ${{ env.GIT_DIFF }}
LINT_DIFF: 1

View File

@ -602,6 +602,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: actions/setup-go@v4
with:
go-version: "1.20"
@ -619,7 +621,7 @@ jobs:
if: env.GIT_DIFF
run: |
cd store
go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock rocksdb_build' ./...
nix develop .. -c go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock rocksdb' ./...
- name: sonarcloud
if: ${{ env.GIT_DIFF && !github.event.pull_request.draft && env.SONAR_TOKEN != null }}
uses: SonarSource/sonarcloud-github-action@master
@ -790,9 +792,9 @@ jobs:
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
with:
projectBaseDir: x/circuit/
test-x-protocolpool:
runs-on: ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v4
@ -820,7 +822,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
with:
projectBaseDir: x/protocolpool/
projectBaseDir: x/protocolpool/
test-x-feegrant:
runs-on: ubuntu-latest

View File

@ -11,6 +11,9 @@ run:
- ".*\\.pb\\.gw\\.go$"
- ".*\\.pulsar\\.go$"
build-tags:
- rocksdb
linters:
disable-all: true
enable:

View File

@ -99,7 +99,7 @@ require (
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/linxGnu/grocksdb v1.8.0 // indirect
github.com/linxGnu/grocksdb v1.8.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect

View File

@ -527,8 +527,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE=
github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg=
github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo=
github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=

View File

@ -30,7 +30,7 @@ require (
github.com/klauspost/compress v1.16.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/linxGnu/grocksdb v1.7.16 // indirect
github.com/linxGnu/grocksdb v1.8.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/onsi/gomega v1.20.0 // indirect
github.com/pkg/errors v0.9.1 // indirect

View File

@ -77,8 +77,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8=
github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4=
github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE=
github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=

View File

@ -34,7 +34,7 @@ require (
github.com/klauspost/compress v1.16.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/linxGnu/grocksdb v1.7.16 // indirect
github.com/linxGnu/grocksdb v1.8.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect

View File

@ -75,8 +75,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8=
github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4=
github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE=
github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=

View File

@ -11,8 +11,17 @@
outputs = { self, nixpkgs, gomod2nix, flake-utils }:
{
overlays.default = pkgs: _: {
simd = pkgs.callPackage ./simapp { rev = self.shortRev or "dev"; };
overlays.default = self: super: {
simd = self.callPackage ./simapp { rev = self.shortRev or "dev"; };
rocksdb = super.rocksdb.overrideAttrs (_: rec {
version = "8.5.3";
src = self.fetchFromGitHub {
owner = "facebook";
repo = "rocksdb";
rev = "v${version}";
sha256 = "sha256-Qa4bAprXptA79ilNE5KSfggEDvNFHdrvDQ6SvzWMQus=";
};
});
};
} //
(flake-utils.lib.eachDefaultSystem

2
go.mod
View File

@ -123,7 +123,7 @@ require (
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/linxGnu/grocksdb v1.8.0 // indirect
github.com/linxGnu/grocksdb v1.8.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/minio/highwayhash v1.0.2 // indirect

4
go.sum
View File

@ -534,8 +534,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE=
github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg=
github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo=
github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=

View File

@ -46,7 +46,7 @@ require (
github.com/klauspost/compress v1.16.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/linxGnu/grocksdb v1.7.16 // indirect
github.com/linxGnu/grocksdb v1.8.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/onsi/gomega v1.20.0 // indirect
github.com/pkg/errors v0.9.1 // indirect

View File

@ -96,8 +96,8 @@ github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3x
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8=
github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4=
github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE=
github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=

View File

@ -130,7 +130,7 @@ require (
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/linxGnu/grocksdb v1.8.0 // indirect
github.com/linxGnu/grocksdb v1.8.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect

View File

@ -749,8 +749,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE=
github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg=
github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo=
github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=

View File

@ -327,8 +327,8 @@ schema = 3
version = "v0.1.0"
hash = "sha256-wQqGTtRWsfR9n0O/SXHVgECebbnNmHddxJIbG63OJBQ="
[mod."github.com/linxGnu/grocksdb"]
version = "v1.8.0"
hash = "sha256-o6zj18at4oN6pqSioFqd1AXduR/cx0xDgtL1rPPw+1M="
version = "v1.8.4"
hash = "sha256-AGQ0YEuwUaev/mxOkiTNgAOLB9c7reHyTmNxXuaRah8="
[mod."github.com/magiconair/properties"]
version = "v1.8.7"
hash = "sha256-XQ2bnc2s7/IH3WxEO4GishZurMyKwEclZy1DXg+2xXc="

17
store/batch.go Normal file
View File

@ -0,0 +1,17 @@
package store
// Batch is a write-only database that commits changes to the underlying database
// when Write is called. A batch cannot be used concurrently.
type Batch interface {
Writer
// Size retrieves the amount of data queued up for writing, this includes
// the keys, values, and deleted keys.
Size() int
// Write flushes any accumulated data to disk.
Write() error
// Reset resets the batch.
Reset()
}

10
store/branchkv/README.md Normal file
View File

@ -0,0 +1,10 @@
# branchkv
The `branchkv.Store` implementation defines a `BranchedKVStore` that contains a
reference to a `VersionedDatabase`, i.e. an SS backend. The `branchkv.Store` is
meant to be used as the primary store used in a `RootStore` implementation. It
provides the ability to get the current `ChangeSet`, branching, and writing to
a parent store (if one is defined). Note, all reads first pass through the
staged, i.e. dirty writes. If a key is not found in the staged writes, the read
is then passed to the parent store (if one is defined), finally falling back to
the backing SS engine.

141
store/branchkv/iterator.go Normal file
View File

@ -0,0 +1,141 @@
package branchkv
import (
"slices"
"cosmossdk.io/store/v2"
)
var _ store.Iterator = (*iterator)(nil)
// iterator walks over both the KVStore's changeset, i.e. dirty writes, and the
// parent iterator, which can either be another KVStore or the SS backend, at the
// same time.
//
// Note, writes that happen on the KVStore over an iterator will not affect the
// iterator. This is because when an iterator is created, it takes a current
// snapshot of the changeset.
type iterator struct {
parentItr store.Iterator
start []byte
end []byte
key []byte
value []byte
keys []string
values []store.KVPair
reverse bool
exhausted bool // exhausted reflects if the parent iterator is exhausted or not
}
// Domain returns the domain of the iterator. The caller must not modify the
// return values.
func (itr *iterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
func (itr *iterator) Key() []byte {
return slices.Clone(itr.key)
}
func (itr *iterator) Value() []byte {
return slices.Clone(itr.value)
}
func (itr *iterator) Close() {
itr.key = nil
itr.value = nil
itr.keys = nil
itr.values = nil
itr.parentItr.Close()
}
func (itr *iterator) Next() bool {
for {
switch {
case itr.exhausted && len(itr.keys) == 0: // exhausted both
itr.key = nil
itr.value = nil
return false
case itr.exhausted: // exhausted parent iterator but not store (dirty writes) iterator
nextKey := itr.keys[0]
nextValue := itr.values[0]
// pop off the key
itr.keys[0] = ""
itr.keys = itr.keys[1:]
// pop off the value
itr.values[0].Value = nil
itr.values = itr.values[1:]
if nextValue.Value != nil {
itr.key = []byte(nextKey)
itr.value = nextValue.Value
return true
}
case len(itr.keys) == 0: // exhausted store (dirty writes) iterator but not parent iterator
itr.key = itr.parentItr.Key()
itr.value = itr.parentItr.Value()
itr.exhausted = !itr.parentItr.Next()
return true
default: // parent iterator is not exhausted and we have store (dirty writes) remaining
dirtyKey := itr.keys[0]
dirtyVal := itr.values[0]
parentKey := itr.parentItr.Key()
parentKeyStr := string(parentKey)
switch {
case (!itr.reverse && dirtyKey < parentKeyStr) || (itr.reverse && dirtyKey > parentKeyStr): // dirty key should come before parent's key
// pop off key
itr.keys[0] = ""
itr.keys = itr.keys[1:]
// pop off value
itr.values[0].Value = nil
itr.values = itr.values[1:]
if dirtyVal.Value != nil {
itr.key = []byte(dirtyKey)
itr.value = dirtyVal.Value
return true
}
case (!itr.reverse && parentKeyStr < dirtyKey) || (itr.reverse && parentKeyStr > dirtyKey): // parent's key should come before dirty key
itr.key = parentKey
itr.value = itr.parentItr.Value()
itr.exhausted = !itr.parentItr.Next()
return true
default:
// pop off key
itr.keys[0] = ""
itr.keys = itr.keys[1:]
// pop off value
itr.values[0].Value = nil
itr.values = itr.values[1:]
itr.exhausted = !itr.parentItr.Next()
if dirtyVal.Value != nil {
itr.key = []byte(dirtyKey)
itr.value = dirtyVal.Value
return true
}
}
}
}
}
func (itr *iterator) Valid() bool {
return itr.key != nil && itr.value != nil
}
func (itr *iterator) Error() error {
return itr.parentItr.Error()
}

312
store/branchkv/store.go Normal file
View File

@ -0,0 +1,312 @@
package branchkv
import (
"io"
"slices"
"sync"
"golang.org/x/exp/maps"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/tracekv"
)
var _ store.BranchedKVStore = (*Store)(nil)
// Store implements both a KVStore and BranchedKVStore interfaces. It is used to
// accumulate writes that can be later committed to backing SS and SC engines or
// discarded altogether. If a read is not found through an uncommitted write, it
// will be delegated to the SS backend.
type Store struct {
mu sync.Mutex
// storage reflects backing storage (SS) for reads that are not found in uncommitted volatile state
storage store.VersionedDatabase
// version indicates the latest version to handle reads falling through to SS
version uint64
// storeKey reflects the store key used for the store
storeKey string
// parent reflects a parent store if branched (it may be nil)
parent store.KVStore
// changeset reflects the uncommitted writes to the store
changeset map[string]store.KVPair
}
func New(storeKey string, ss store.VersionedDatabase) (store.BranchedKVStore, error) {
latestVersion, err := ss.GetLatestVersion()
if err != nil {
return nil, err
}
return &Store{
storage: ss,
storeKey: storeKey,
version: latestVersion,
changeset: make(map[string]store.KVPair),
}, nil
}
func NewWithParent(parent store.KVStore) store.BranchedKVStore {
return &Store{
parent: parent,
storeKey: parent.GetStoreKey(),
changeset: make(map[string]store.KVPair),
}
}
func (s *Store) GetStoreKey() string {
return s.storeKey
}
func (s *Store) GetStoreType() store.StoreType {
return store.StoreTypeBranch
}
// GetChangeset returns the uncommitted writes to the store, ordered by key.
func (s *Store) GetChangeset() *store.Changeset {
keys := maps.Keys(s.changeset)
slices.Sort(keys)
pairs := make([]store.KVPair, len(keys))
for i, key := range keys {
kvPair := s.changeset[key]
pairs[i] = store.KVPair{
Key: []byte(key),
Value: slices.Clone(kvPair.Value),
StoreKey: kvPair.StoreKey,
}
}
return store.NewChangeset(pairs...)
}
func (s *Store) Reset() error {
s.mu.Lock()
defer s.mu.Unlock()
latestVersion, err := s.storage.GetLatestVersion()
if err != nil {
return err
}
clear(s.changeset)
s.version = latestVersion
return nil
}
func (s *Store) Branch() store.BranchedKVStore {
return NewWithParent(s)
}
func (s *Store) BranchWithTrace(w io.Writer, tc store.TraceContext) store.BranchedKVStore {
return NewWithParent(tracekv.New(s, w, tc))
}
func (s *Store) Has(key []byte) bool {
store.AssertValidKey(key)
s.mu.Lock()
defer s.mu.Unlock()
// if the write is present in the changeset, i.e. a dirty write, evaluate it
if kvPair, ok := s.changeset[string(key)]; ok {
// a non-nil value indicates presence
return kvPair.Value != nil
}
// if the store is branched, check the parent store
if s.parent != nil {
return s.parent.Has(key)
}
// otherwise, we fallback to SS
ok, err := s.storage.Has(s.storeKey, s.version, key)
if err != nil {
panic(err)
}
return ok
}
func (s *Store) Get(key []byte) []byte {
store.AssertValidKey(key)
s.mu.Lock()
defer s.mu.Unlock()
// if the write is present in the changeset, i.e. a dirty write, evaluate it
if kvPair, ok := s.changeset[string(key)]; ok {
if kvPair.Value == nil {
return nil
}
return slices.Clone(kvPair.Value)
}
// if the store is branched, check the parent store
if s.parent != nil {
return s.parent.Get(key)
}
// otherwise, we fallback to SS
bz, err := s.storage.Get(s.storeKey, s.version, key)
if err != nil {
panic(err)
}
return bz
}
func (s *Store) Set(key, value []byte) {
store.AssertValidKey(key)
store.AssertValidValue(value)
s.mu.Lock()
defer s.mu.Unlock()
// omit the key as that can be inferred from the map key
s.changeset[string(key)] = store.KVPair{Value: slices.Clone(value), StoreKey: s.storeKey}
}
func (s *Store) Delete(key []byte) {
store.AssertValidKey(key)
s.mu.Lock()
defer s.mu.Unlock()
// omit the key as that can be inferred from the map key
s.changeset[string(key)] = store.KVPair{Value: nil, StoreKey: s.storeKey}
}
func (s *Store) Write() {
s.mu.Lock()
defer s.mu.Unlock()
// Note, we're only flushing the writes up to the parent, if it exists. We are
// not writing to the SS backend as that will happen in Commit().
if s.parent != nil {
keys := maps.Keys(s.changeset)
slices.Sort(keys)
// flush changes upstream to the parent in sorted order by key
for _, key := range keys {
kvPair := s.changeset[key]
if kvPair.Value == nil {
s.parent.Delete([]byte(key))
} else {
s.parent.Set([]byte(key), kvPair.Value)
}
}
}
}
// Iterator creates an iterator over the domain [start, end), which walks over
// both the KVStore's changeset, i.e. dirty writes, and the parent iterator,
// which can either be another KVStore or the SS backend, at the same time.
//
// Note, writes that happen on the KVStore over an iterator will not affect the
// iterator. This is because when an iterator is created, it takes a current
// snapshot of the changeset.
func (s *Store) Iterator(start, end []byte) store.Iterator {
s.mu.Lock()
defer s.mu.Unlock()
var parentItr store.Iterator
if s.parent != nil {
parentItr = s.parent.Iterator(start, end)
} else {
var err error
parentItr, err = s.storage.Iterator(s.storeKey, s.version, start, end)
if err != nil {
panic(err)
}
}
return s.newIterator(parentItr, start, end, false)
}
// ReverseIterator creates a reverse iterator over the domain [start, end), which
// walks over both the KVStore's changeset, i.e. dirty writes, and the parent
// iterator, which can either be another KVStore or the SS backend, at the same
// time.
//
// Note, writes that happen on the KVStore over an iterator will not affect the
// iterator. This is because when an iterator is created, it takes a current
// snapshot of the changeset.
func (s *Store) ReverseIterator(start, end []byte) store.Iterator {
s.mu.Lock()
defer s.mu.Unlock()
var parentItr store.Iterator
if s.parent != nil {
parentItr = s.parent.ReverseIterator(start, end)
} else {
var err error
parentItr, err = s.storage.ReverseIterator(s.storeKey, s.version, start, end)
if err != nil {
panic(err)
}
}
return s.newIterator(parentItr, start, end, true)
}
func (s *Store) newIterator(parentItr store.Iterator, start, end []byte, reverse bool) *iterator {
startStr := string(start)
endStr := string(end)
keys := make([]string, 0, len(s.changeset))
for key := range s.changeset {
switch {
case start != nil && end != nil:
if key >= startStr && key < endStr {
keys = append(keys, key)
}
case start != nil:
if key >= startStr {
keys = append(keys, key)
}
case end != nil:
if key < endStr {
keys = append(keys, key)
}
default:
keys = append(keys, key)
}
}
slices.Sort(keys)
if reverse {
slices.Reverse(keys)
}
values := make([]store.KVPair, len(keys))
for i, key := range keys {
values[i] = s.changeset[key]
}
itr := &iterator{
parentItr: parentItr,
start: start,
end: end,
keys: keys,
values: values,
reverse: reverse,
exhausted: !parentItr.Valid(),
}
// call Next() to move the iterator to the first key/value entry
_ = itr.Next()
return itr
}

View File

@ -0,0 +1,562 @@
package branchkv_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/suite"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/branchkv"
"cosmossdk.io/store/v2/storage/sqlite"
)
const storeKey = "storeKey"
type StoreTestSuite struct {
suite.Suite
storage store.VersionedDatabase
kvStore store.BranchedKVStore
}
func TestStorageTestSuite(t *testing.T) {
suite.Run(t, &StoreTestSuite{})
}
func (s *StoreTestSuite) SetupTest() {
storage, err := sqlite.New(s.T().TempDir())
s.Require().NoError(err)
cs := new(store.Changeset)
for i := 0; i < 100; i++ {
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
cs.AddKVPair(store.KVPair{StoreKey: storeKey, Key: []byte(key), Value: []byte(val)})
}
s.Require().NoError(storage.ApplyChangeset(1, cs))
kvStore, err := branchkv.New(storeKey, storage)
s.Require().NoError(err)
s.storage = storage
s.kvStore = kvStore
}
func (s *StoreTestSuite) TestGetStoreType() {
s.Require().Equal(store.StoreTypeBranch, s.kvStore.GetStoreType())
}
func (s *StoreTestSuite) TestGetChangeset() {
// initial store with no writes should have an empty changeset
cs := s.kvStore.GetChangeset()
s.Require().Zero(cs.Size())
// perform some writes
s.kvStore.Set([]byte("key000"), []byte("updated_val000"))
s.kvStore.Delete([]byte("key001"))
cs = s.kvStore.GetChangeset()
s.Require().Equal(cs.Size(), 2)
}
func (s *StoreTestSuite) TestReset() {
s.Require().NoError(s.kvStore.Reset())
cs := s.kvStore.GetChangeset()
s.Require().Zero(cs.Size())
}
func (s *StoreTestSuite) TestGet() {
// perform read of key000, which is not dirty
bz := s.kvStore.Get([]byte("key000"))
s.Require().Equal([]byte("val000"), bz)
// update key000 and perform a read which should reflect the new value
s.kvStore.Set([]byte("key000"), []byte("updated_val000"))
bz = s.kvStore.Get([]byte("key000"))
s.Require().Equal([]byte("updated_val000"), bz)
// ensure the primary SS backend is not modified
bz, err := s.storage.Get(storeKey, 1, []byte("key000"))
s.Require().NoError(err)
s.Require().Equal([]byte("val000"), bz)
}
func (s *StoreTestSuite) TestHas() {
// perform read of key000, which is not dirty thus falling back to SS
ok := s.kvStore.Has([]byte("key000"))
s.Require().True(ok)
ok = s.kvStore.Has([]byte("key100"))
s.Require().False(ok)
// perform a write of a brand new key not in SS, but in the changeset
s.kvStore.Set([]byte("key100"), []byte("val100"))
ok = s.kvStore.Has([]byte("key100"))
s.Require().True(ok)
}
func (s *StoreTestSuite) TestBranch() {
// perform a few writes on the original store
s.kvStore.Set([]byte("key000"), []byte("updated_val000"))
s.kvStore.Set([]byte("key001"), []byte("updated_val001"))
// create a new branch
b := s.kvStore.Branch()
// update an existing dirty write
b.Set([]byte("key001"), []byte("branched_updated_val001"))
// perform reads on the branched store without writing first
// key000 is dirty in the original store, but not in the branched store
s.Require().Equal([]byte("updated_val000"), b.Get([]byte("key000")))
// key001 is dirty in both the original and branched store, but branched store
// should reflect the branched write.
s.Require().Equal([]byte("branched_updated_val001"), b.Get([]byte("key001")))
// key002 is not dirty in either store, so should fall back to SS
s.Require().Equal([]byte("val002"), b.Get([]byte("key002")))
// ensure the original store is not modified
s.Require().Equal([]byte("updated_val001"), s.kvStore.Get([]byte("key001")))
s.Require().Equal(1, b.GetChangeset().Size())
s.Require().Equal([]byte("key001"), b.GetChangeset().Pairs[0].Key)
// write the branched store and ensure all writes are flushed to the parent
b.Write()
s.Require().Equal([]byte("branched_updated_val001"), s.kvStore.Get([]byte("key001")))
s.Require().Equal(2, s.kvStore.GetChangeset().Size())
}
func (s *StoreTestSuite) TestIterator_NoWrites() {
// iterator without an end domain
s.Run("start_only", func() {
itr := s.kvStore.Iterator([]byte("key000"), nil)
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator without a start domain
s.Run("end_only", func() {
itr := s.kvStore.Iterator(nil, []byte("key100"))
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with with a start and end domain
s.Run("start_and_end", func() {
itr := s.kvStore.Iterator([]byte("key000"), []byte("key050"))
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(50, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with an open domain
s.Run("open_domain", func() {
itr := s.kvStore.Iterator(nil, nil)
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
}
func (s *StoreTestSuite) TestIterator_DirtyWrites() {
// modify all even keys
for i := 0; i < 100; i++ {
if i%2 == 0 {
key := fmt.Sprintf("key%03d", i) // key000, key002, ...
val := fmt.Sprintf("updated_val%03d", i) // updated_val000, updated_val002, ...
s.kvStore.Set([]byte(key), []byte(val))
}
}
// add some new keys to ensure we cover those as well
for i := 100; i < 150; i++ {
key := fmt.Sprintf("key%03d", i) // key100, key101, ...
val := fmt.Sprintf("val%03d", i) // val100, val101, ...
s.kvStore.Set([]byte(key), []byte(val))
}
// iterator without an end domain
s.Run("start_only", func() {
itr := s.kvStore.Iterator([]byte("key000"), nil)
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i++
count++
}
s.Require().Equal(150, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator without a start domain
s.Run("end_only", func() {
itr := s.kvStore.Iterator(nil, []byte("key150"))
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i++
count++
}
s.Require().Equal(150, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with with a start and end domain
s.Run("start_and_end", func() {
itr := s.kvStore.Iterator([]byte("key000"), []byte("key050"))
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i++
count++
}
s.Require().Equal(50, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with an open domain
s.Run("open_domain", func() {
itr := s.kvStore.Iterator(nil, nil)
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i++
count++
}
s.Require().Equal(150, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
}
func (s *StoreTestSuite) TestReverseIterator_NoWrites() {
// reverse iterator without an end domain
s.Run("start_only", func() {
itr := s.kvStore.ReverseIterator([]byte("key000"), nil)
defer itr.Close()
i := 99
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// reverse iterator without a start domain
s.Run("end_only", func() {
itr := s.kvStore.ReverseIterator(nil, []byte("key100"))
defer itr.Close()
i := 99
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// reverse iterator with with a start and end domain
s.Run("start_and_end", func() {
itr := s.kvStore.ReverseIterator([]byte("key000"), []byte("key050"))
defer itr.Close()
i := 49
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(50, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// reverse iterator with an open domain
s.Run("open_domain", func() {
itr := s.kvStore.ReverseIterator(nil, nil)
defer itr.Close()
i := 99
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
}
func (s *StoreTestSuite) TestReverseIterator_DirtyWrites() {
// modify all even keys
for i := 0; i < 100; i++ {
if i%2 == 0 {
key := fmt.Sprintf("key%03d", i) // key000, key002, ...
val := fmt.Sprintf("updated_val%03d", i) // updated_val000, updated_val002, ...
s.kvStore.Set([]byte(key), []byte(val))
}
}
// add some new keys to ensure we cover those as well
for i := 100; i < 150; i++ {
key := fmt.Sprintf("key%03d", i) // key100, key101, ...
val := fmt.Sprintf("val%03d", i) // val100, val101, ...
s.kvStore.Set([]byte(key), []byte(val))
}
// reverse iterator without an end domain
s.Run("start_only", func() {
itr := s.kvStore.ReverseIterator([]byte("key000"), nil)
defer itr.Close()
i := 149
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), "itr_key: %s, count: %d", string(itr.Key()), count)
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i--
count++
}
s.Require().Equal(150, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator without a start domain
s.Run("end_only", func() {
itr := s.kvStore.ReverseIterator(nil, []byte("key150"))
defer itr.Close()
i := 149
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i--
count++
}
s.Require().Equal(150, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with with a start and end domain
s.Run("start_and_end", func() {
itr := s.kvStore.ReverseIterator([]byte("key000"), []byte("key050"))
defer itr.Close()
i := 49
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i--
count++
}
s.Require().Equal(50, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with an open domain
s.Run("open_domain", func() {
itr := s.kvStore.ReverseIterator(nil, nil)
defer itr.Close()
i := 149
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
if i%2 == 0 && i < 100 {
s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value())
} else {
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
}
i--
count++
}
s.Require().Equal(150, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
}

View File

@ -1,49 +0,0 @@
package cache
import (
"testing"
"cosmossdk.io/store/types"
)
func freshMgr() *CommitKVStoreCacheManager {
return &CommitKVStoreCacheManager{
caches: map[string]types.CommitKVStore{
"a1": nil,
"alalalalalal": nil,
},
}
}
func populate(mgr *CommitKVStoreCacheManager) {
mgr.caches["this one"] = (types.CommitKVStore)(nil)
mgr.caches["those ones are the ones"] = (types.CommitKVStore)(nil)
mgr.caches["very huge key right here and there are we going to ones are the ones"] = (types.CommitKVStore)(nil)
}
func BenchmarkReset(b *testing.B) {
b.ReportAllocs()
mgr := freshMgr()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
mgr.Reset()
if len(mgr.caches) != 0 {
b.Fatal("Reset failed")
}
populate(mgr)
if len(mgr.caches) == 0 {
b.Fatal("populate failed")
}
mgr.Reset()
if len(mgr.caches) != 0 {
b.Fatal("Reset failed")
}
}
if mgr == nil {
b.Fatal("Impossible condition")
}
}

132
store/cache/cache.go vendored
View File

@ -1,132 +0,0 @@
package cache
import (
"fmt"
lru "github.com/hashicorp/golang-lru"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/types"
)
var (
_ types.CommitKVStore = (*CommitKVStoreCache)(nil)
_ types.MultiStorePersistentCache = (*CommitKVStoreCacheManager)(nil)
// DefaultCommitKVStoreCacheSize defines the persistent ARC cache size for a
// CommitKVStoreCache.
DefaultCommitKVStoreCacheSize uint = 1000
)
type (
// CommitKVStoreCache implements an inter-block (persistent) cache that wraps a
// CommitKVStore. Reads first hit the internal ARC (Adaptive Replacement Cache).
// During a cache miss, the read is delegated to the underlying CommitKVStore
// and cached. Deletes and writes always happen to both the cache and the
// CommitKVStore in a write-through manner. Caching performed in the
// CommitKVStore and below is completely irrelevant to this layer.
CommitKVStoreCache struct {
types.CommitKVStore
cache *lru.ARCCache
}
// CommitKVStoreCacheManager maintains a mapping from a StoreKey to a
// CommitKVStoreCache. Each CommitKVStore, per StoreKey, is meant to be used
// in an inter-block (persistent) manner and typically provided by a
// CommitMultiStore.
CommitKVStoreCacheManager struct {
cacheSize uint
caches map[string]types.CommitKVStore
}
)
func NewCommitKVStoreCache(store types.CommitKVStore, size uint) *CommitKVStoreCache {
cache, err := lru.NewARC(int(size))
if err != nil {
panic(fmt.Errorf("failed to create KVStore cache: %s", err))
}
return &CommitKVStoreCache{
CommitKVStore: store,
cache: cache,
}
}
func NewCommitKVStoreCacheManager(size uint) *CommitKVStoreCacheManager {
return &CommitKVStoreCacheManager{
cacheSize: size,
caches: make(map[string]types.CommitKVStore),
}
}
// GetStoreCache returns a Cache from the CommitStoreCacheManager for a given
// StoreKey. If no Cache exists for the StoreKey, then one is created and set.
// The returned Cache is meant to be used in a persistent manner.
func (cmgr *CommitKVStoreCacheManager) GetStoreCache(key types.StoreKey, store types.CommitKVStore) types.CommitKVStore {
if cmgr.caches[key.Name()] == nil {
cmgr.caches[key.Name()] = NewCommitKVStoreCache(store, cmgr.cacheSize)
}
return cmgr.caches[key.Name()]
}
// Unwrap returns the underlying CommitKVStore for a given StoreKey.
func (cmgr *CommitKVStoreCacheManager) Unwrap(key types.StoreKey) types.CommitKVStore {
if ckv, ok := cmgr.caches[key.Name()]; ok {
return ckv.(*CommitKVStoreCache).CommitKVStore
}
return nil
}
// Reset resets in the internal caches.
func (cmgr *CommitKVStoreCacheManager) Reset() {
// Clear the map.
// Please note that we are purposefully using the map clearing idiom.
// See https://github.com/cosmos/cosmos-sdk/issues/6681.
for key := range cmgr.caches {
delete(cmgr.caches, key)
}
}
// CacheWrap implements the CacheWrapper interface
func (ckv *CommitKVStoreCache) CacheWrap() types.CacheWrap {
return cachekv.NewStore(ckv)
}
// Get retrieves a value by key. It will first look in the write-through cache.
// If the value doesn't exist in the write-through cache, the query is delegated
// to the underlying CommitKVStore.
func (ckv *CommitKVStoreCache) Get(key []byte) []byte {
types.AssertValidKey(key)
keyStr := string(key)
valueI, ok := ckv.cache.Get(keyStr)
if ok {
// cache hit
return valueI.([]byte)
}
// cache miss; write to cache
value := ckv.CommitKVStore.Get(key)
ckv.cache.Add(keyStr, value)
return value
}
// Set inserts a key/value pair into both the write-through cache and the
// underlying CommitKVStore.
func (ckv *CommitKVStoreCache) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
ckv.cache.Add(string(key), value)
ckv.CommitKVStore.Set(key, value)
}
// Delete removes a key/value pair from both the write-through cache and the
// underlying CommitKVStore.
func (ckv *CommitKVStoreCache) Delete(key []byte) {
ckv.cache.Remove(string(key))
ckv.CommitKVStore.Delete(key)
}

View File

@ -1,100 +0,0 @@
package cache_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/cache"
"cosmossdk.io/store/cachekv"
iavlstore "cosmossdk.io/store/iavl"
"cosmossdk.io/store/types"
)
func TestGetOrSetStoreCache(t *testing.T) {
db := dbm.NewMemDB()
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
store2 := mngr.GetStoreCache(sKey, store)
require.NotNil(t, store2)
require.Equal(t, store2, mngr.GetStoreCache(sKey, store))
}
func TestUnwrap(t *testing.T) {
db := dbm.NewMemDB()
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
_ = mngr.GetStoreCache(sKey, store)
require.Equal(t, store, mngr.Unwrap(sKey))
require.Nil(t, mngr.Unwrap(types.NewKVStoreKey("test2")))
}
func TestStoreCache(t *testing.T) {
db := dbm.NewMemDB()
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
kvStore := mngr.GetStoreCache(sKey, store)
for i := uint(0); i < cache.DefaultCommitKVStoreCacheSize*2; i++ {
key := []byte(fmt.Sprintf("key_%d", i))
value := []byte(fmt.Sprintf("value_%d", i))
kvStore.Set(key, value)
res := kvStore.Get(key)
require.Equal(t, res, value)
require.Equal(t, res, store.Get(key))
kvStore.Delete(key)
require.Nil(t, kvStore.Get(key))
require.Nil(t, store.Get(key))
}
}
func TestReset(t *testing.T) {
db := dbm.NewMemDB()
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
store2 := mngr.GetStoreCache(sKey, store)
require.NotNil(t, store2)
require.Equal(t, store2, mngr.GetStoreCache(sKey, store))
// reset and check if the cache is gone
mngr.Reset()
require.Nil(t, mngr.Unwrap(sKey))
// check if the cache is recreated
require.Equal(t, store2, mngr.GetStoreCache(sKey, store))
}
func TestCacheWrap(t *testing.T) {
db := dbm.NewMemDB()
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
cacheWrapper := mngr.GetStoreCache(sKey, store).CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
}

View File

@ -1,140 +0,0 @@
# CacheKVStore specification
A `CacheKVStore` is cache wrapper for a `KVStore`. It extends the operations of the `KVStore` to work with a write-back cache, allowing for reduced I/O operations and more efficient disposing of changes (e.g. after processing a failed transaction).
The core goals the CacheKVStore seeks to solve are:
* Buffer all writes to the parent store, so they can be dropped if they need to be reverted
* Allow iteration over contiguous spans of keys
* Act as a cache, improving access time for reads that have already been done (by replacing tree access with hashtable access, avoiding disk I/O)
* Note: We actually fail to achieve this for iteration right now
* Note: Need to consider this getting too large and dropping some cached reads
* Make subsequent reads account for prior buffered writes
* Write all buffered changes to the parent store
We should revisit these goals with time (for instance it's unclear that all disk writes need to be buffered to the end of the block), but this is the current status.
## Types and Structs
```go
type Store struct {
mtx sync.Mutex
cache map[string]*cValue
deleted map[string]struct{}
unsortedCache map[string]struct{}
sortedCache *dbm.MemDB // always ascending sorted
parent types.KVStore
}
```
The Store struct wraps the underlying `KVStore` (`parent`) with additional data structures for implementing the cache. Mutex is used as IAVL trees (the `KVStore` in application) are not safe for concurrent use.
### `cache`
The main mapping of key-value pairs stored in cache. This map contains both keys that are cached from read operations as well as dirty keys which map to a value that is potentially different than what is in the underlying `KVStore`.
Values that are mapped to in `cache` are wrapped in a `cValue` struct, which contains the value and a boolean flag (`dirty`) representing whether the value has been written since the last write-back to `parent`.
```go
type cValue struct {
value []byte
dirty bool
}
```
### `deleted`
Key-value pairs that are to be deleted from `parent` are stored in the `deleted` map. Keys are mapped to an empty struct to implement a set.
### `unsortedCache`
Similar to `deleted`, this is a set of keys that are dirty and will need to be updated in the parent `KVStore` upon a write. Keys are mapped to an empty struct to implement a set.
### `sortedCache`
A database that will be populated by the keys in `unsortedCache` during iteration over the cache. The keys are always held in sorted order.
## CRUD Operations and Writing
The `Set`, `Get`, and `Delete` functions all call `setCacheValue()`, which is the only entry point to mutating `cache` (besides `Write()`, which clears it).
`setCacheValue()` inserts a key-value pair into `cache`. Two boolean parameters, `deleted` and `dirty`, are passed in to flag whether the inserted key should also be inserted into the `deleted` and `dirty` sets. Keys will be removed from the `deleted` set if they are written to after being deleted.
### `Get`
`Get` first attempts to return the value from `cache`. If the key does not exist in `cache`, `parent.Get()` is called instead. This value from the parent is passed into `setCacheValue()` with `deleted=false` and `dirty=false`.
### `Has`
`Has` returns true if `Get` returns a non-nil value. As a result of calling `Get`, it may mutate the cache by caching the read.
### `Set`
New values are written by setting or updating the value of a key in `cache`. `Set` does not write to `parent`.
Calls `setCacheValue()` with `deleted=false` and `dirty=true`.
### `Delete`
A value being deleted from the `KVStore` is represented with a `nil` value in `cache`, and an insertion of the key into the `deleted` set. `Delete` does not write to `parent`.
Calls `setCacheValue()` with `deleted=true` and `dirty=true`.
### `Write`
Key-value pairs in the cache are written to `parent` in ascending order of their keys.
A slice of all dirty keys in `cache` is made, then sorted in increasing order. These keys are iterated over to update `parent`.
If a key is marked for deletion (checked with `isDeleted()`), then `parent.Delete()` is called. Otherwise, `parent.Set()` is called to update the underlying `KVStore` with the value in cache.
## Iteration
Efficient iteration over keys in `KVStore` is important for generating Merkle range proofs. Iteration over `CacheKVStore` requires producing all key-value pairs from the underlying `KVStore` while taking into account updated values from the cache.
In the current implementation, there is no guarantee that all values in `parent` have been cached. As a result, iteration is achieved by interleaved iteration through both `parent` and the cache (failing to actually benefit from caching).
[cacheMergeIterator](https://github.com/cosmos/cosmos-sdk/blob/d8391cb6796d770b02448bee70b865d824e43449/store/cachekv/mergeiterator.go) implements functions to provide a single iterator with an input of iterators over `parent` and the cache. This iterator iterates over keys from both iterators in a shared lexicographic order, and overrides the value provided by the parent iterator if the same key is dirty or deleted in the cache.
### Implementation Overview
Iterators over `parent` and the cache are generated and passed into `cacheMergeIterator`, which returns a single, interleaved iterator. Implementation of the `parent` iterator is up to the underlying `KVStore`. The remainder of this section covers the generation of the cache iterator.
Recall that `unsortedCache` is an unordered set of dirty cache keys. Our goal is to construct an ordered iterator over cache keys that fall within the `start` and `end` bounds requested.
Generating the cache iterator can be decomposed into four parts:
1. Finding all keys that exist in the range we are iterating over
2. Sorting this list of keys
3. Inserting these keys into `sortedCache` and removing them from `unsortedCache`
4. Returning an iterator over `sortedCache` with the desired range
Currently, the implementation for the first two parts is split into two cases, depending on the size of the unsorted cache. The two cases are as follows.
If the size of `unsortedCache` is less than `minSortSize` (currently 1024), a linear time approach is taken to search over keys.
```go
n := len(store.unsortedCache)
unsorted := make([]*kv.Pair, 0)
if n < minSortSize {
for key := range store.unsortedCache {
if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) {
cacheValue := store.cache[key]
unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value})
}
}
store.clearUnsortedCacheSubset(unsorted, stateUnsorted)
return
}
```
Here, we iterate through all the keys in `unsortedCache` (i.e., the dirty cache keys), collecting those within the requested range in an unsorted slice called `unsorted`.
At this point, part 3. is achieved in `clearUnsortedCacheSubset()`. This function iterates through `unsorted`, removing each key from `unsortedCache`. Afterwards, `unsorted` is sorted. Lastly, it iterates through the now sorted slice, inserting key-value pairs into `sortedCache`. Any key marked for deletion is mapped to an arbitrary value (`[]byte{}`).
In the case that the size of `unsortedCache` is larger than `minSortSize`, a linear time approach to finding keys within the desired range is too slow to use. Instead, a slice of all keys in `unsortedCache` is sorted, and binary search is used to find the beginning and ending indices of the desired range. This produces an already-sorted slice that is passed into the same `clearUnsortedCacheSubset()` function. An iota identifier (`sortedState`) is used to skip the sorting step in the function.
Finally, part 4. is achieved with `memIterator`, which implements an iterator over the items in `sortedCache`.
As of [PR #12885](https://github.com/cosmos/cosmos-sdk/pull/12885), an optimization to the binary search case mitigates the overhead of sorting the entirety of the key set in `unsortedCache`. To avoid wasting the compute spent sorting, we should ensure that a reasonable amount of values are removed from `unsortedCache`. If the length of the range for iteration is less than `minSortedCache`, we widen the range of values for removal from `unsortedCache` to be up to `minSortedCache` in length. This amortizes the cost of processing elements across multiple calls.

View File

@ -1,44 +0,0 @@
package cachekv_test
import "crypto/rand"
func randSlice(sliceSize int) []byte {
bz := make([]byte, sliceSize)
_, _ = rand.Read(bz)
return bz
}
func incrementByteSlice(bz []byte) {
for index := len(bz) - 1; index >= 0; index-- {
if bz[index] < 255 {
bz[index]++
break
} else {
bz[index] = 0
}
}
}
// Generate many keys starting at startKey, and are in sequential order
func generateSequentialKeys(startKey []byte, numKeys int) [][]byte {
toReturn := make([][]byte, 0, numKeys)
cur := make([]byte, len(startKey))
copy(cur, startKey)
for i := 0; i < numKeys; i++ {
newKey := make([]byte, len(startKey))
copy(newKey, cur)
toReturn = append(toReturn, newKey)
incrementByteSlice(cur)
}
return toReturn
}
// Generate many random, unsorted keys
func generateRandomKeys(keySize, numKeys int) [][]byte {
toReturn := make([][]byte, 0, numKeys)
for i := 0; i < numKeys; i++ {
newKey := randSlice(keySize)
toReturn = append(toReturn, newKey)
}
return toReturn
}

View File

@ -1,136 +0,0 @@
package cachekv_test
import (
fmt "fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/types"
)
func DoBenchmarkDeepCacheStack(b *testing.B, depth int) {
b.Helper()
db := dbm.NewMemDB()
initialStore := cachekv.NewStore(dbadapter.Store{DB: db})
nItems := 20
for i := 0; i < nItems; i++ {
initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0})
}
var stack CacheStack
stack.Reset(initialStore)
for i := 0; i < depth; i++ {
stack.Snapshot()
store := stack.CurrentStore()
store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)})
}
store := stack.CurrentStore()
b.ResetTimer()
for i := 0; i < b.N; i++ {
it := store.Iterator(nil, nil)
items := make([][]byte, 0, nItems)
for ; it.Valid(); it.Next() {
items = append(items, it.Key())
it.Value()
}
it.Close()
require.Equal(b, nItems, len(items))
}
}
func BenchmarkDeepCacheStack1(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 1)
}
func BenchmarkDeepCacheStack3(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 3)
}
func BenchmarkDeepCacheStack10(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 10)
}
func BenchmarkDeepCacheStack13(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 13)
}
// CacheStack manages a stack of nested cache store to
// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods.
type CacheStack struct {
initialStore types.CacheKVStore
// Context of the initial state before transaction execution.
// It's the context used by `StateDB.CommitedState`.
cacheStores []types.CacheKVStore
}
// CurrentContext returns the top context of cached stack,
// if the stack is empty, returns the initial context.
func (cs *CacheStack) CurrentStore() types.CacheKVStore {
l := len(cs.cacheStores)
if l == 0 {
return cs.initialStore
}
return cs.cacheStores[l-1]
}
// Reset sets the initial context and clear the cache context stack.
func (cs *CacheStack) Reset(initialStore types.CacheKVStore) {
cs.initialStore = initialStore
cs.cacheStores = nil
}
// IsEmpty returns true if the cache context stack is empty.
func (cs *CacheStack) IsEmpty() bool {
return len(cs.cacheStores) == 0
}
// Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts.
func (cs *CacheStack) Commit() {
// commit in order from top to bottom
for i := len(cs.cacheStores) - 1; i >= 0; i-- {
cs.cacheStores[i].Write()
}
cs.cacheStores = nil
}
// CommitToRevision commit the cache after the target revision,
// to improve efficiency of db operations.
func (cs *CacheStack) CommitToRevision(target int) error {
if target < 0 || target >= len(cs.cacheStores) {
return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))
}
// commit in order from top to bottom
for i := len(cs.cacheStores) - 1; i > target; i-- {
cs.cacheStores[i].Write()
}
cs.cacheStores = cs.cacheStores[0 : target+1]
return nil
}
// Snapshot pushes a new cached context to the stack,
// and returns the index of it.
func (cs *CacheStack) Snapshot() int {
cs.cacheStores = append(cs.cacheStores, cachekv.NewStore(cs.CurrentStore()))
return len(cs.cacheStores) - 1
}
// RevertToSnapshot pops all the cached contexts after the target index (inclusive).
// the target should be snapshot index returned by `Snapshot`.
// This function panics if the index is out of bounds.
func (cs *CacheStack) RevertToSnapshot(target int) {
if target < 0 || target >= len(cs.cacheStores) {
panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)))
}
cs.cacheStores = cs.cacheStores[:target]
}

View File

@ -1,91 +0,0 @@
package internal
import (
"bytes"
"errors"
"github.com/tidwall/btree"
"cosmossdk.io/store/types"
)
const (
// The approximate number of items and children per B-tree node. Tuned with benchmarks.
// copied from memdb.
bTreeDegree = 32
)
var errKeyEmpty = errors.New("key cannot be empty")
// BTree implements the sorted cache for cachekv store,
// we don't use MemDB here because cachekv is used extensively in sdk core path,
// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests.
//
// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly.
type BTree struct {
tree *btree.BTreeG[item]
}
// NewBTree creates a wrapper around `btree.BTreeG`.
func NewBTree() BTree {
return BTree{
tree: btree.NewBTreeGOptions(byKeys, btree.Options{
Degree: bTreeDegree,
NoLocks: false,
}),
}
}
func (bt BTree) Set(key, value []byte) {
bt.tree.Set(newItem(key, value))
}
func (bt BTree) Get(key []byte) []byte {
i, found := bt.tree.Get(newItem(key, nil))
if !found {
return nil
}
return i.value
}
func (bt BTree) Delete(key []byte) {
bt.tree.Delete(newItem(key, nil))
}
func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, errKeyEmpty
}
return newMemIterator(start, end, bt, true), nil
}
func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, errKeyEmpty
}
return newMemIterator(start, end, bt, false), nil
}
// Copy the tree. This is a copy-on-write operation and is very fast because
// it only performs a shadowed copy.
func (bt BTree) Copy() BTree {
return BTree{
tree: bt.tree.Copy(),
}
}
// item is a btree item with byte slices as keys and values
type item struct {
key []byte
value []byte
}
// byKeys compares the items by key
func byKeys(a, b item) bool {
return bytes.Compare(a.key, b.key) == -1
}
// newItem creates a new pair item.
func newItem(key, value []byte) item {
return item{key: key, value: value}
}

View File

@ -1,204 +0,0 @@
package internal
import (
"testing"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/types"
)
func TestGetSetDelete(t *testing.T) {
db := NewBTree()
// A nonexistent key should return nil.
value := db.Get([]byte("a"))
require.Nil(t, value)
// Set and get a value.
db.Set([]byte("a"), []byte{0x01})
db.Set([]byte("b"), []byte{0x02})
value = db.Get([]byte("a"))
require.Equal(t, []byte{0x01}, value)
value = db.Get([]byte("b"))
require.Equal(t, []byte{0x02}, value)
// Deleting a non-existent value is fine.
db.Delete([]byte("x"))
// Delete a value.
db.Delete([]byte("a"))
value = db.Get([]byte("a"))
require.Nil(t, value)
db.Delete([]byte("b"))
value = db.Get([]byte("b"))
require.Nil(t, value)
}
func TestDBIterator(t *testing.T) {
db := NewBTree()
for i := 0; i < 10; i++ {
if i != 6 { // but skip 6.
db.Set(int642Bytes(int64(i)), []byte{})
}
}
// Blank iterator keys should error
_, err := db.ReverseIterator([]byte{}, nil)
require.Equal(t, errKeyEmpty, err)
_, err = db.ReverseIterator(nil, []byte{})
require.Equal(t, errKeyEmpty, err)
itr, err := db.Iterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator")
ritr, err := db.ReverseIterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator")
itr, err = db.Iterator(nil, int642Bytes(0))
require.NoError(t, err)
verifyIterator(t, itr, []int64(nil), "forward iterator to 0")
ritr, err = db.ReverseIterator(int642Bytes(10), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)")
itr, err = db.Iterator(int642Bytes(0), nil)
require.NoError(t, err)
verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0")
itr, err = db.Iterator(int642Bytes(1), nil)
require.NoError(t, err)
verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1")
ritr, err = db.ReverseIterator(nil, int642Bytes(10))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)")
ritr, err = db.ReverseIterator(nil, int642Bytes(9))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)")
ritr, err = db.ReverseIterator(nil, int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)")
itr, err = db.Iterator(int642Bytes(5), int642Bytes(6))
require.NoError(t, err)
verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6")
itr, err = db.Iterator(int642Bytes(5), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7")
itr, err = db.Iterator(int642Bytes(5), int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8")
itr, err = db.Iterator(int642Bytes(6), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7")
itr, err = db.Iterator(int642Bytes(6), int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8")
itr, err = db.Iterator(int642Bytes(7), int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{5, 4}, "reverse iterator from 6 (ex) to 4")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{5, 4}, "reverse iterator from 7 (ex) to 4")
ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5")
ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5")
ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64(nil), "reverse iterator from 7 (ex) to 6")
ritr, err = db.ReverseIterator(int642Bytes(10), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10")
ritr, err = db.ReverseIterator(int642Bytes(6), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6")
ritr, err = db.ReverseIterator(int642Bytes(5), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5")
ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8")
ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{3, 2}, "reverse iterator from 4 (ex) to 2")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64(nil), "reverse iterator from 2 (ex) to 4")
// Ensure that the iterators don't panic with an empty database.
db2 := NewBTree()
itr, err = db2.Iterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, itr, nil, "forward iterator with empty db")
ritr, err = db2.ReverseIterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, ritr, nil, "reverse iterator with empty db")
}
func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) {
t.Helper()
i := 0
for itr.Valid() {
key := itr.Key()
require.Equal(t, expected[i], bytes2Int64(key), "iterator: %d mismatches", i)
itr.Next()
i++
}
require.Equal(t, i, len(expected), "expected to have fully iterated over all the elements in iter")
require.NoError(t, itr.Close())
}
func int642Bytes(i int64) []byte {
return types.Uint64ToBigEndian(uint64(i))
}
func bytes2Int64(buf []byte) int64 {
return int64(types.BigEndianToUint64(buf))
}

View File

@ -1,120 +0,0 @@
package internal
import (
"bytes"
"errors"
"github.com/tidwall/btree"
"cosmossdk.io/store/types"
)
var _ types.Iterator = (*memIterator)(nil)
// memIterator iterates over iterKVCache items.
// if value is nil, means it was deleted.
// Implements Iterator.
type memIterator struct {
iter btree.IterG[item]
start []byte
end []byte
ascending bool
valid bool
}
func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator {
iter := items.tree.Iter()
var valid bool
if ascending {
if start != nil {
valid = iter.Seek(newItem(start, nil))
} else {
valid = iter.First()
}
} else {
if end != nil {
valid = iter.Seek(newItem(end, nil))
if !valid {
valid = iter.Last()
} else {
// end is exclusive
valid = iter.Prev()
}
} else {
valid = iter.Last()
}
}
mi := &memIterator{
iter: iter,
start: start,
end: end,
ascending: ascending,
valid: valid,
}
if mi.valid {
mi.valid = mi.keyInRange(mi.Key())
}
return mi
}
func (mi *memIterator) Domain() (start, end []byte) {
return mi.start, mi.end
}
func (mi *memIterator) Close() error {
mi.iter.Release()
return nil
}
func (mi *memIterator) Error() error {
if !mi.Valid() {
return errors.New("invalid memIterator")
}
return nil
}
func (mi *memIterator) Valid() bool {
return mi.valid
}
func (mi *memIterator) Next() {
mi.assertValid()
if mi.ascending {
mi.valid = mi.iter.Next()
} else {
mi.valid = mi.iter.Prev()
}
if mi.valid {
mi.valid = mi.keyInRange(mi.Key())
}
}
func (mi *memIterator) keyInRange(key []byte) bool {
if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 {
return false
}
if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 {
return false
}
return true
}
func (mi *memIterator) Key() []byte {
return mi.iter.Item().key
}
func (mi *memIterator) Value() []byte {
return mi.iter.Item().value
}
func (mi *memIterator) assertValid() {
if err := mi.Error(); err != nil {
panic(err)
}
}

View File

@ -1,235 +0,0 @@
package internal
import (
"bytes"
"errors"
"cosmossdk.io/store/types"
)
// cacheMergeIterator merges a parent Iterator and a cache Iterator.
// The cache iterator may return nil keys to signal that an item
// had been deleted (but not deleted in the parent).
// If the cache iterator has the same key as the parent, the
// cache shadows (overrides) the parent.
//
// TODO: Optimize by memoizing.
type cacheMergeIterator struct {
parent types.Iterator
cache types.Iterator
ascending bool
valid bool
}
var _ types.Iterator = (*cacheMergeIterator)(nil)
func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator {
iter := &cacheMergeIterator{
parent: parent,
cache: cache,
ascending: ascending,
}
iter.valid = iter.skipUntilExistsOrInvalid()
return iter
}
// Domain implements Iterator.
// Returns parent domain because cache and parent domains are the same.
func (iter *cacheMergeIterator) Domain() (start, end []byte) {
return iter.parent.Domain()
}
// Valid implements Iterator.
func (iter *cacheMergeIterator) Valid() bool {
return iter.valid
}
// Next implements Iterator
func (iter *cacheMergeIterator) Next() {
iter.assertValid()
switch {
case !iter.parent.Valid():
// If parent is invalid, get the next cache item.
iter.cache.Next()
case !iter.cache.Valid():
// If cache is invalid, get the next parent item.
iter.parent.Next()
default:
// Both are valid. Compare keys.
keyP, keyC := iter.parent.Key(), iter.cache.Key()
switch iter.compare(keyP, keyC) {
case -1: // parent < cache
iter.parent.Next()
case 0: // parent == cache
iter.parent.Next()
iter.cache.Next()
case 1: // parent > cache
iter.cache.Next()
}
}
iter.valid = iter.skipUntilExistsOrInvalid()
}
// Key implements Iterator
func (iter *cacheMergeIterator) Key() []byte {
iter.assertValid()
// If parent is invalid, get the cache key.
if !iter.parent.Valid() {
return iter.cache.Key()
}
// If cache is invalid, get the parent key.
if !iter.cache.Valid() {
return iter.parent.Key()
}
// Both are valid. Compare keys.
keyP, keyC := iter.parent.Key(), iter.cache.Key()
cmp := iter.compare(keyP, keyC)
switch cmp {
case -1: // parent < cache
return keyP
case 0: // parent == cache
return keyP
case 1: // parent > cache
return keyC
default:
panic("invalid compare result")
}
}
// Value implements Iterator
func (iter *cacheMergeIterator) Value() []byte {
iter.assertValid()
// If parent is invalid, get the cache value.
if !iter.parent.Valid() {
return iter.cache.Value()
}
// If cache is invalid, get the parent value.
if !iter.cache.Valid() {
return iter.parent.Value()
}
// Both are valid. Compare keys.
keyP, keyC := iter.parent.Key(), iter.cache.Key()
cmp := iter.compare(keyP, keyC)
switch cmp {
case -1: // parent < cache
return iter.parent.Value()
case 0: // parent == cache
return iter.cache.Value()
case 1: // parent > cache
return iter.cache.Value()
default:
panic("invalid comparison result")
}
}
// Close implements Iterator
func (iter *cacheMergeIterator) Close() error {
err1 := iter.cache.Close()
if err := iter.parent.Close(); err != nil {
return err
}
return err1
}
// Error returns an error if the cacheMergeIterator is invalid defined by the
// Valid method.
func (iter *cacheMergeIterator) Error() error {
if !iter.Valid() {
return errors.New("invalid cacheMergeIterator")
}
return nil
}
// If not valid, panics.
// NOTE: May have side-effect of iterating over cache.
func (iter *cacheMergeIterator) assertValid() {
if err := iter.Error(); err != nil {
panic(err)
}
}
// Like bytes.Compare but opposite if not ascending.
func (iter *cacheMergeIterator) compare(a, b []byte) int {
if iter.ascending {
return bytes.Compare(a, b)
}
return bytes.Compare(a, b) * -1
}
// Skip all delete-items from the cache w/ `key < until`. After this function,
// current cache item is a non-delete-item, or `until <= key`.
// If the current cache item is not a delete item, does nothing.
// If `until` is nil, there is no limit, and cache may end up invalid.
// CONTRACT: cache is valid.
func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) {
for iter.cache.Valid() &&
iter.cache.Value() == nil &&
(until == nil || iter.compare(iter.cache.Key(), until) < 0) {
iter.cache.Next()
}
}
// Fast forwards cache (or parent+cache in case of deleted items) until current
// item exists, or until iterator becomes invalid.
// Returns whether the iterator is valid.
func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool {
for {
// If parent is invalid, fast-forward cache.
if !iter.parent.Valid() {
iter.skipCacheDeletes(nil)
return iter.cache.Valid()
}
// Parent is valid.
if !iter.cache.Valid() {
return true
}
// Parent is valid, cache is valid.
// Compare parent and cache.
keyP := iter.parent.Key()
keyC := iter.cache.Key()
switch iter.compare(keyP, keyC) {
case -1: // parent < cache.
return true
case 0: // parent == cache.
// Skip over if cache item is a delete.
valueC := iter.cache.Value()
if valueC == nil {
iter.parent.Next()
iter.cache.Next()
continue
}
// Cache is not a delete.
return true // cache exists.
case 1: // cache < parent
// Skip over if cache item is a delete.
valueC := iter.cache.Value()
if valueC == nil {
iter.skipCacheDeletes(keyP)
continue
}
// Cache is not a delete.
return true // cache exists.
}
}
}

View File

@ -1,44 +0,0 @@
package cachekv
import (
"strconv"
"testing"
"cosmossdk.io/store/cachekv/internal"
)
func BenchmarkLargeUnsortedMisses(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
store := generateStore()
b.StartTimer()
for k := 0; k < 10000; k++ {
// cache has A + Z values
// these are within range, but match nothing
store.dirtyItems([]byte("B1"), []byte("B2"))
}
}
}
func generateStore() *Store {
cache := map[string]*cValue{}
unsorted := map[string]struct{}{}
for i := 0; i < 5000; i++ {
key := "A" + strconv.Itoa(i)
unsorted[key] = struct{}{}
cache[key] = &cValue{}
}
for i := 0; i < 5000; i++ {
key := "Z" + strconv.Itoa(i)
unsorted[key] = struct{}{}
cache[key] = &cValue{}
}
return &Store{
cache: cache,
unsortedCache: unsorted,
sortedCache: internal.NewBTree(),
}
}

View File

@ -1,141 +0,0 @@
package cachekv
import "testing"
func TestFindStartIndex(t *testing.T) {
tests := []struct {
name string
sortedL []string
query string
want int
}{
{
name: "non-existent value",
sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "o",
want: 8,
},
{
name: "dupes start at index 0",
sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "a",
want: 0,
},
{
name: "dupes start at non-index 0",
sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "c",
want: 1,
},
{
name: "at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"},
query: "z",
want: 7,
},
{
name: "dupes at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"},
query: "z",
want: 7,
},
{
name: "entirely dupes",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "z",
want: 0,
},
{
name: "non-existent but within >=start",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "p",
want: 0,
},
{
name: "non-existent and out of range",
sortedL: []string{"d", "e", "f", "g", "h"},
query: "z",
want: -1,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
body := tt.sortedL
got := findStartIndex(body, tt.query)
if got != tt.want {
t.Fatalf("Got: %d, want: %d", got, tt.want)
}
})
}
}
func TestFindEndIndex(t *testing.T) {
tests := []struct {
name string
sortedL []string
query string
want int
}{
{
name: "non-existent value",
sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "o",
want: 7,
},
{
name: "dupes start at index 0",
sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "a",
want: 0,
},
{
name: "dupes start at non-index 0",
sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "c",
want: 1,
},
{
name: "at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"},
query: "z",
want: 7,
},
{
name: "dupes at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"},
query: "z",
want: 7,
},
{
name: "entirely dupes",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "z",
want: 0,
},
{
name: "non-existent and out of range",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "p",
want: -1,
},
{
name: "non-existent and out of range",
sortedL: []string{"d", "e", "f", "g", "h"},
query: "z",
want: 4,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
body := tt.sortedL
got := findEndIndex(body, tt.query)
if got != tt.want {
t.Fatalf("Got: %d, want: %d", got, tt.want)
}
})
}
}

View File

@ -1,408 +0,0 @@
package cachekv
import (
"bytes"
"io"
"sort"
"sync"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/math"
"cosmossdk.io/store/cachekv/internal"
"cosmossdk.io/store/internal/conv"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
// cValue represents a cached value.
// If dirty is true, it indicates the cached value is different from the underlying value.
type cValue struct {
value []byte
dirty bool
}
// Store wraps an in-memory cache around an underlying types.KVStore.
type Store struct {
mtx sync.Mutex
cache map[string]*cValue
unsortedCache map[string]struct{}
sortedCache internal.BTree // always ascending sorted
parent types.KVStore
}
var _ types.CacheKVStore = (*Store)(nil)
// NewStore creates a new Store object
func NewStore(parent types.KVStore) *Store {
return &Store{
cache: make(map[string]*cValue),
unsortedCache: make(map[string]struct{}),
sortedCache: internal.NewBTree(),
parent: parent,
}
}
// GetStoreType implements Store.
func (store *Store) GetStoreType() types.StoreType {
return store.parent.GetStoreType()
}
// Get implements types.KVStore.
func (store *Store) Get(key []byte) (value []byte) {
store.mtx.Lock()
defer store.mtx.Unlock()
types.AssertValidKey(key)
cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)]
if !ok {
value = store.parent.Get(key)
store.setCacheValue(key, value, false)
} else {
value = cacheValue.value
}
return value
}
// Set implements types.KVStore.
func (store *Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
store.mtx.Lock()
defer store.mtx.Unlock()
store.setCacheValue(key, value, true)
}
// Has implements types.KVStore.
func (store *Store) Has(key []byte) bool {
value := store.Get(key)
return value != nil
}
// Delete implements types.KVStore.
func (store *Store) Delete(key []byte) {
types.AssertValidKey(key)
store.mtx.Lock()
defer store.mtx.Unlock()
store.setCacheValue(key, nil, true)
}
func (store *Store) resetCaches() {
if len(store.cache) > 100_000 {
// Cache is too large. We likely did something linear time
// (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated.
// TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation.
// 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem.
store.cache = make(map[string]*cValue)
store.unsortedCache = make(map[string]struct{})
} else {
// Clear the cache using the map clearing idiom
// and not allocating fresh objects.
// Please see https://bencher.orijtech.com/perfclinic/mapclearing/
for key := range store.cache {
delete(store.cache, key)
}
for key := range store.unsortedCache {
delete(store.unsortedCache, key)
}
}
store.sortedCache = internal.NewBTree()
}
// Implements Cachetypes.KVStore.
func (store *Store) Write() {
store.mtx.Lock()
defer store.mtx.Unlock()
if len(store.cache) == 0 && len(store.unsortedCache) == 0 {
store.sortedCache = internal.NewBTree()
return
}
type cEntry struct {
key string
val *cValue
}
// We need a copy of all of the keys.
// Not the best. To reduce RAM pressure, we copy the values as well
// and clear out the old caches right after the copy.
sortedCache := make([]cEntry, 0, len(store.cache))
for key, dbValue := range store.cache {
if dbValue.dirty {
sortedCache = append(sortedCache, cEntry{key, dbValue})
}
}
store.resetCaches()
sort.Slice(sortedCache, func(i, j int) bool {
return sortedCache[i].key < sortedCache[j].key
})
// TODO: Consider allowing usage of Batch, which would allow the write to
// at least happen atomically.
for _, obj := range sortedCache {
// We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot
// be sure if the underlying store might do a save with the byteslice or
// not. Once we get confirmation that .Delete is guaranteed not to
// save the byteslice, then we can assume only a read-only copy is sufficient.
if obj.val.value != nil {
// It already exists in the parent, hence update it.
store.parent.Set([]byte(obj.key), obj.val.value)
} else {
store.parent.Delete([]byte(obj.key))
}
}
}
// CacheWrap implements CacheWrapper.
func (store *Store) CacheWrap() types.CacheWrap {
return NewStore(store)
}
// CacheWrapWithTrace implements the CacheWrapper interface.
func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return NewStore(tracekv.NewStore(store, w, tc))
}
//----------------------------------------
// Iteration
// Iterator implements types.KVStore.
func (store *Store) Iterator(start, end []byte) types.Iterator {
return store.iterator(start, end, true)
}
// ReverseIterator implements types.KVStore.
func (store *Store) ReverseIterator(start, end []byte) types.Iterator {
return store.iterator(start, end, false)
}
func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator {
store.mtx.Lock()
defer store.mtx.Unlock()
store.dirtyItems(start, end)
isoSortedCache := store.sortedCache.Copy()
var (
err error
parent, cache types.Iterator
)
if ascending {
parent = store.parent.Iterator(start, end)
cache, err = isoSortedCache.Iterator(start, end)
} else {
parent = store.parent.ReverseIterator(start, end)
cache, err = isoSortedCache.ReverseIterator(start, end)
}
if err != nil {
panic(err)
}
return internal.NewCacheMergeIterator(parent, cache, ascending)
}
func findStartIndex(strL []string, startQ string) int {
// Modified binary search to find the very first element in >=startQ.
if len(strL) == 0 {
return -1
}
var left, right, mid int
right = len(strL) - 1
for left <= right {
mid = (left + right) >> 1
midStr := strL[mid]
if midStr == startQ {
// Handle condition where there might be multiple values equal to startQ.
// We are looking for the very first value < midStL, that i+1 will be the first
// element >= midStr.
for i := mid - 1; i >= 0; i-- {
if strL[i] != midStr {
return i + 1
}
}
return 0
}
if midStr < startQ {
left = mid + 1
} else { // midStrL > startQ
right = mid - 1
}
}
if left >= 0 && left < len(strL) && strL[left] >= startQ {
return left
}
return -1
}
func findEndIndex(strL []string, endQ string) int {
if len(strL) == 0 {
return -1
}
// Modified binary search to find the very first element <endQ.
var left, right, mid int
right = len(strL) - 1
for left <= right {
mid = (left + right) >> 1
midStr := strL[mid]
if midStr == endQ {
// Handle condition where there might be multiple values equal to startQ.
// We are looking for the very first value < midStL, that i+1 will be the first
// element >= midStr.
for i := mid - 1; i >= 0; i-- {
if strL[i] < midStr {
return i + 1
}
}
return 0
}
if midStr < endQ {
left = mid + 1
} else { // midStrL > startQ
right = mid - 1
}
}
// Binary search failed, now let's find a value less than endQ.
for i := right; i >= 0; i-- {
if strL[i] < endQ {
return i
}
}
return -1
}
type sortState int
const (
stateUnsorted sortState = iota
stateAlreadySorted
)
const minSortSize = 1024
// Constructs a slice of dirty items, to use w/ memIterator.
func (store *Store) dirtyItems(start, end []byte) {
startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end)
if end != nil && startStr > endStr {
// Nothing to do here.
return
}
n := len(store.unsortedCache)
unsorted := make([]*kv.Pair, 0)
// If the unsortedCache is too big, its costs too much to determine
// whats in the subset we are concerned about.
// If you are interleaving iterator calls with writes, this can easily become an
// O(N^2) overhead.
// Even without that, too many range checks eventually becomes more expensive
// than just not having the cache.
if n < minSortSize {
for key := range store.unsortedCache {
// dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start
if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) {
cacheValue := store.cache[key]
unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value})
}
}
store.clearUnsortedCacheSubset(unsorted, stateUnsorted)
return
}
// Otherwise it is large so perform a modified binary search to find
// the target ranges for the keys that we should be looking for.
strL := make([]string, 0, n)
for key := range store.unsortedCache {
strL = append(strL, key)
}
sort.Strings(strL)
// Now find the values within the domain
// [start, end)
startIndex := findStartIndex(strL, startStr)
if startIndex < 0 {
startIndex = 0
}
var endIndex int
if end == nil {
endIndex = len(strL) - 1
} else {
endIndex = findEndIndex(strL, endStr)
}
if endIndex < 0 {
endIndex = len(strL) - 1
}
// Since we spent cycles to sort the values, we should process and remove a reasonable amount
// ensure start to end is at least minSortSize in size
// if below minSortSize, expand it to cover additional values
// this amortizes the cost of processing elements across multiple calls
if endIndex-startIndex < minSortSize {
endIndex = math.Min(startIndex+minSortSize, len(strL)-1)
if endIndex-startIndex < minSortSize {
startIndex = math.Max(endIndex-minSortSize, 0)
}
}
kvL := make([]*kv.Pair, 0, 1+endIndex-startIndex)
for i := startIndex; i <= endIndex; i++ {
key := strL[i]
cacheValue := store.cache[key]
kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value})
}
// kvL was already sorted so pass it in as is.
store.clearUnsortedCacheSubset(kvL, stateAlreadySorted)
}
func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) {
n := len(store.unsortedCache)
if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map.
for key := range store.unsortedCache {
delete(store.unsortedCache, key)
}
} else { // Otherwise, normally delete the unsorted keys from the map.
for _, kv := range unsorted {
delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key))
}
}
if sortState == stateUnsorted {
sort.Slice(unsorted, func(i, j int) bool {
return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0
})
}
for _, item := range unsorted {
// sortedCache is able to store `nil` value to represent deleted items.
store.sortedCache.Set(item.Key, item.Value)
}
}
//----------------------------------------
// etc
// Only entrypoint to mutate store.cache.
// A `nil` value means a deletion.
func (store *Store) setCacheValue(key, value []byte, dirty bool) {
keyStr := conv.UnsafeBytesToStr(key)
store.cache[keyStr] = &cValue{
value: value,
dirty: dirty,
}
if dirty {
store.unsortedCache[keyStr] = struct{}{}
}
}

View File

@ -1,153 +0,0 @@
package cachekv_test
import (
"testing"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
)
var sink interface{}
const defaultValueSizeBz = 1 << 12
// This benchmark measures the time of iterator.Next() when the parent store is blank
func benchmarkBlankParentIteratorNext(b *testing.B, keysize int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
kvstore := cachekv.NewStore(mem)
// Use a singleton for value, to not waste time computing it
value := randSlice(defaultValueSizeBz)
// Use simple values for keys, pick a random start,
// and take next b.N keys sequentially after.]
startKey := randSlice(32)
// Add 1 to avoid issues when b.N = 1
keys := generateSequentialKeys(startKey, b.N+1)
for _, k := range keys {
kvstore.Set(k, value)
}
b.ReportAllocs()
b.ResetTimer()
iter := kvstore.Iterator(keys[0], keys[b.N])
defer iter.Close()
for ; iter.Valid(); iter.Next() {
_ = iter.Key()
// deadcode elimination stub
sink = iter
}
}
// Benchmark setting New keys to a store, where the new keys are in sequence.
func benchmarkBlankParentAppend(b *testing.B, keysize int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
kvstore := cachekv.NewStore(mem)
// Use a singleton for value, to not waste time computing it
value := randSlice(32)
// Use simple values for keys, pick a random start,
// and take next b.N keys sequentially after.
startKey := randSlice(32)
keys := generateSequentialKeys(startKey, b.N)
b.ReportAllocs()
b.ResetTimer()
for _, k := range keys {
kvstore.Set(k, value)
}
}
// Benchmark setting New keys to a store, where the new keys are random.
// the speed of this function does not depend on the values in the parent store
func benchmarkRandomSet(b *testing.B, keysize int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
kvstore := cachekv.NewStore(mem)
// Use a singleton for value, to not waste time computing it
value := randSlice(defaultValueSizeBz)
// Add 1 to avoid issues when b.N = 1
keys := generateRandomKeys(keysize, b.N+1)
b.ReportAllocs()
b.ResetTimer()
for _, k := range keys {
kvstore.Set(k, value)
}
iter := kvstore.Iterator(keys[0], keys[b.N])
defer iter.Close()
for ; iter.Valid(); iter.Next() {
_ = iter.Key()
// deadcode elimination stub
sink = iter
}
}
// Benchmark creating an iterator on a parent with D entries,
// that are all deleted in the cacheKV store.
// We essentially are benchmarking the cacheKV iterator creation & iteration times
// with the number of entries deleted in the parent.
func benchmarkIteratorOnParentWithManyDeletes(b *testing.B, numDeletes int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
// Use a singleton for value, to not waste time computing it
value := randSlice(32)
// Use simple values for keys, pick a random start,
// and take next D keys sequentially after.
startKey := randSlice(32)
// Add 1 to avoid issues when numDeletes = 1
keys := generateSequentialKeys(startKey, numDeletes+1)
// setup parent db with D keys.
for _, k := range keys {
mem.Set(k, value)
}
kvstore := cachekv.NewStore(mem)
// Delete all keys from the cache KV store.
// The keys[1:] is to keep at least one entry in parent, due to a bug in the SDK iterator design.
// Essentially the iterator will never be valid, in that it should never run.
// However, this is incompatible with the for loop structure the SDK uses, hence
// causes a panic. Thus we do keys[1:].
for _, k := range keys[1:] {
kvstore.Delete(k)
}
b.ReportAllocs()
b.ResetTimer()
iter := kvstore.Iterator(keys[0], keys[numDeletes])
defer iter.Close()
for ; iter.Valid(); iter.Next() {
_ = iter.Key()
// deadcode elimination stub
sink = iter
}
}
func BenchmarkBlankParentIteratorNextKeySize32(b *testing.B) {
benchmarkBlankParentIteratorNext(b, 32)
}
func BenchmarkBlankParentAppendKeySize32(b *testing.B) {
benchmarkBlankParentAppend(b, 32)
}
func BenchmarkSetKeySize32(b *testing.B) {
benchmarkRandomSet(b, 32)
}
func BenchmarkIteratorOnParentWith1MDeletes(b *testing.B) {
benchmarkIteratorOnParentWithManyDeletes(b, 1_000_000)
}

View File

@ -1,694 +0,0 @@
package cachekv_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/math/unsafe"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/types"
)
func newCacheKVStore() types.CacheKVStore {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
return cachekv.NewStore(mem)
}
func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) }
func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) }
func TestCacheKVStore(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
st := cachekv.NewStore(mem)
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
// put something in mem and in cache
mem.Set(keyFmt(1), valFmt(1))
st.Set(keyFmt(1), valFmt(1))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
// update it in cache, shoudn't change mem
st.Set(keyFmt(1), valFmt(2))
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
require.Equal(t, valFmt(1), mem.Get(keyFmt(1)))
// write it. should change mem
st.Write()
require.Equal(t, valFmt(2), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
// more writes and checks
st.Write()
st.Write()
require.Equal(t, valFmt(2), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
// make a new one, check it
st = cachekv.NewStore(mem)
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
// make a new one and delete - should not be removed from mem
st = cachekv.NewStore(mem)
st.Delete(keyFmt(1))
require.Empty(t, st.Get(keyFmt(1)))
require.Equal(t, mem.Get(keyFmt(1)), valFmt(2))
// Write. should now be removed from both
st.Write()
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty")
}
func TestCacheKVStoreNoNilSet(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
st := cachekv.NewStore(mem)
require.Panics(t, func() { st.Set([]byte("key"), nil) }, "setting a nil value should panic")
require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
}
func TestCacheKVStoreNested(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
st := cachekv.NewStore(mem)
// set. check its there on st and not on mem.
st.Set(keyFmt(1), valFmt(1))
require.Empty(t, mem.Get(keyFmt(1)))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
// make a new from st and check
st2 := cachekv.NewStore(st)
require.Equal(t, valFmt(1), st2.Get(keyFmt(1)))
// update the value on st2, check it only effects st2
st2.Set(keyFmt(1), valFmt(3))
require.Equal(t, []byte(nil), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
require.Equal(t, valFmt(3), st2.Get(keyFmt(1)))
// st2 writes to its parent, st. doesnt effect mem
st2.Write()
require.Equal(t, []byte(nil), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(3), st.Get(keyFmt(1)))
// updates mem
st.Write()
require.Equal(t, valFmt(3), mem.Get(keyFmt(1)))
}
func TestCacheKVIteratorBounds(t *testing.T) {
st := newCacheKVStore()
// set some items
nItems := 5
for i := 0; i < nItems; i++ {
st.Set(keyFmt(i), valFmt(i))
}
// iterate over all of them
itr := st.Iterator(nil, nil)
i := 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, nItems, i)
require.NoError(t, itr.Close())
// iterate over none
itr = st.Iterator(bz("money"), nil)
i = 0
for ; itr.Valid(); itr.Next() {
i++
}
require.Equal(t, 0, i)
require.NoError(t, itr.Close())
// iterate over lower
itr = st.Iterator(keyFmt(0), keyFmt(3))
i = 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, 3, i)
require.NoError(t, itr.Close())
// iterate over upper
itr = st.Iterator(keyFmt(2), keyFmt(4))
i = 2
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, 4, i)
require.NoError(t, itr.Close())
}
func TestCacheKVReverseIteratorBounds(t *testing.T) {
st := newCacheKVStore()
// set some items
nItems := 5
for i := 0; i < nItems; i++ {
st.Set(keyFmt(i), valFmt(i))
}
// iterate over all of them
itr := st.ReverseIterator(nil, nil)
i := 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(nItems-1-i), k)
require.Equal(t, valFmt(nItems-1-i), v)
i++
}
require.Equal(t, nItems, i)
require.NoError(t, itr.Close())
// iterate over none
itr = st.ReverseIterator(bz("money"), nil)
i = 0
for ; itr.Valid(); itr.Next() {
i++
}
require.Equal(t, 0, i)
require.NoError(t, itr.Close())
// iterate over lower
end := 3
itr = st.ReverseIterator(keyFmt(0), keyFmt(end))
i = 0
for ; itr.Valid(); itr.Next() {
i++
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(end-i), k)
require.Equal(t, valFmt(end-i), v)
}
require.Equal(t, 3, i)
require.NoError(t, itr.Close())
// iterate over upper
end = 4
itr = st.ReverseIterator(keyFmt(2), keyFmt(end))
i = 0
for ; itr.Valid(); itr.Next() {
i++
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(end-i), k)
require.Equal(t, valFmt(end-i), v)
}
require.Equal(t, 2, i)
require.NoError(t, itr.Close())
}
func TestCacheKVMergeIteratorBasics(t *testing.T) {
st := newCacheKVStore()
// set and delete an item in the cache, iterator should be empty
k, v := keyFmt(0), valFmt(0)
st.Set(k, v)
st.Delete(k)
assertIterateDomain(t, st, 0)
// now set it and assert its there
st.Set(k, v)
assertIterateDomain(t, st, 1)
// write it and assert its there
st.Write()
assertIterateDomain(t, st, 1)
// remove it in cache and assert its not
st.Delete(k)
assertIterateDomain(t, st, 0)
// write the delete and assert its not there
st.Write()
assertIterateDomain(t, st, 0)
// add two keys and assert theyre there
k1, v1 := keyFmt(1), valFmt(1)
st.Set(k, v)
st.Set(k1, v1)
assertIterateDomain(t, st, 2)
// write it and assert theyre there
st.Write()
assertIterateDomain(t, st, 2)
// remove one in cache and assert its not
st.Delete(k1)
assertIterateDomain(t, st, 1)
// write the delete and assert its not there
st.Write()
assertIterateDomain(t, st, 1)
// delete the other key in cache and asserts its empty
st.Delete(k)
assertIterateDomain(t, st, 0)
}
func TestCacheKVMergeIteratorDeleteLast(t *testing.T) {
st := newCacheKVStore()
// set some items and write them
nItems := 5
for i := 0; i < nItems; i++ {
st.Set(keyFmt(i), valFmt(i))
}
st.Write()
// set some more items and leave dirty
for i := nItems; i < nItems*2; i++ {
st.Set(keyFmt(i), valFmt(i))
}
// iterate over all of them
assertIterateDomain(t, st, nItems*2)
// delete them all
for i := 0; i < nItems*2; i++ {
last := nItems*2 - 1 - i
st.Delete(keyFmt(last))
assertIterateDomain(t, st, last)
}
}
func TestCacheKVMergeIteratorDeletes(t *testing.T) {
st := newCacheKVStore()
truth := dbm.NewMemDB()
// set some items and write them
nItems := 10
for i := 0; i < nItems; i++ {
doOp(t, st, truth, opSet, i)
}
st.Write()
// delete every other item, starting from 0
for i := 0; i < nItems; i += 2 {
doOp(t, st, truth, opDel, i)
assertIterateDomainCompare(t, st, truth)
}
// reset
st = newCacheKVStore()
truth = dbm.NewMemDB()
// set some items and write them
for i := 0; i < nItems; i++ {
doOp(t, st, truth, opSet, i)
}
st.Write()
// delete every other item, starting from 1
for i := 1; i < nItems; i += 2 {
doOp(t, st, truth, opDel, i)
assertIterateDomainCompare(t, st, truth)
}
}
func TestCacheKVMergeIteratorChunks(t *testing.T) {
st := newCacheKVStore()
// Use the truth to check values on the merge iterator
truth := dbm.NewMemDB()
// sets to the parent
setRange(t, st, truth, 0, 20)
setRange(t, st, truth, 40, 60)
st.Write()
// sets to the cache
setRange(t, st, truth, 20, 40)
setRange(t, st, truth, 60, 80)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 80}})
// remove some parents and some cache
deleteRange(t, st, truth, 15, 25)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 80}})
// remove some parents and some cache
deleteRange(t, st, truth, 35, 45)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {45, 80}})
// write, add more to the cache, and delete some cache
st.Write()
setRange(t, st, truth, 38, 42)
deleteRange(t, st, truth, 40, 43)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {38, 40}, {45, 80}})
}
func TestCacheKVMergeIteratorDomain(t *testing.T) {
st := newCacheKVStore()
itr := st.Iterator(nil, nil)
start, end := itr.Domain()
require.Equal(t, start, end)
require.NoError(t, itr.Close())
itr = st.Iterator(keyFmt(40), keyFmt(60))
start, end = itr.Domain()
require.Equal(t, keyFmt(40), start)
require.Equal(t, keyFmt(60), end)
require.NoError(t, itr.Close())
start, end = st.ReverseIterator(keyFmt(0), keyFmt(80)).Domain()
require.Equal(t, keyFmt(0), start)
require.Equal(t, keyFmt(80), end)
}
func TestCacheKVMergeIteratorRandom(t *testing.T) {
st := newCacheKVStore()
truth := dbm.NewMemDB()
start, end := 25, 975
max := 1000
setRange(t, st, truth, start, end)
// do an op, test the iterator
for i := 0; i < 2000; i++ {
doRandomOp(t, st, truth, max)
assertIterateDomainCompare(t, st, truth)
}
}
func TestNilEndIterator(t *testing.T) {
const SIZE = 3000
tests := []struct {
name string
write bool
startIndex int
end []byte
}{
{name: "write=false, end=nil", write: false, end: nil, startIndex: 1000},
{name: "write=false, end=nil; full key scan", write: false, end: nil, startIndex: 2000},
{name: "write=true, end=nil", write: true, end: nil, startIndex: 1000},
{name: "write=false, end=non-nil", write: false, end: keyFmt(3000), startIndex: 1000},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st := newCacheKVStore()
for i := 0; i < SIZE; i++ {
kstr := keyFmt(i)
st.Set(kstr, valFmt(i))
}
if tt.write {
st.Write()
}
itr := st.Iterator(keyFmt(tt.startIndex), tt.end)
i := tt.startIndex
j := 0
for itr.Valid() {
require.Equal(t, keyFmt(i), itr.Key())
require.Equal(t, valFmt(i), itr.Value())
itr.Next()
i++
j++
}
require.Equal(t, SIZE-tt.startIndex, j)
require.NoError(t, itr.Close())
})
}
}
// TestIteratorDeadlock demonstrate the deadlock issue in cache store.
func TestIteratorDeadlock(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
store := cachekv.NewStore(mem)
// the channel buffer is 64 and received once, so put at least 66 elements.
for i := 0; i < 66; i++ {
store.Set([]byte(fmt.Sprintf("key%d", i)), []byte{1})
}
it := store.Iterator(nil, nil)
defer it.Close()
store.Set([]byte("key20"), []byte{1})
// it'll be blocked here with previous version, or enable lock on btree.
it2 := store.Iterator(nil, nil)
defer it2.Close()
}
//-------------------------------------------------------------------------------------------
// do some random ops
const (
opSet = 0
opSetRange = 1
opDel = 2
opDelRange = 3
opWrite = 4
totalOps = 5 // number of possible operations
)
func randInt(n int) int {
return unsafe.NewRand().Int() % n
}
// useful for replaying a error case if we find one
func doOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, op int, args ...int) {
t.Helper()
switch op {
case opSet:
k := args[0]
st.Set(keyFmt(k), valFmt(k))
err := truth.Set(keyFmt(k), valFmt(k))
require.NoError(t, err)
case opSetRange:
start := args[0]
end := args[1]
setRange(t, st, truth, start, end)
case opDel:
k := args[0]
st.Delete(keyFmt(k))
err := truth.Delete(keyFmt(k))
require.NoError(t, err)
case opDelRange:
start := args[0]
end := args[1]
deleteRange(t, st, truth, start, end)
case opWrite:
st.Write()
}
}
func doRandomOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, maxKey int) {
t.Helper()
r := randInt(totalOps)
switch r {
case opSet:
k := randInt(maxKey)
st.Set(keyFmt(k), valFmt(k))
err := truth.Set(keyFmt(k), valFmt(k))
require.NoError(t, err)
case opSetRange:
start := randInt(maxKey - 2)
end := randInt(maxKey-start) + start
setRange(t, st, truth, start, end)
case opDel:
k := randInt(maxKey)
st.Delete(keyFmt(k))
err := truth.Delete(keyFmt(k))
require.NoError(t, err)
case opDelRange:
start := randInt(maxKey - 2)
end := randInt(maxKey-start) + start
deleteRange(t, st, truth, start, end)
case opWrite:
st.Write()
}
}
//-------------------------------------------------------------------------------------------
// iterate over whole domain
func assertIterateDomain(t *testing.T, st types.KVStore, expectedN int) {
t.Helper()
itr := st.Iterator(nil, nil)
i := 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, expectedN, i)
require.NoError(t, itr.Close())
}
func assertIterateDomainCheck(t *testing.T, st types.KVStore, mem dbm.DB, r []keyRange) {
t.Helper()
// iterate over each and check they match the other
itr := st.Iterator(nil, nil)
itr2, err := mem.Iterator(nil, nil) // ground truth
require.NoError(t, err)
krc := newKeyRangeCounter(r)
i := 0
for ; krc.valid(); krc.next() {
require.True(t, itr.Valid())
require.True(t, itr2.Valid())
// check the key/val matches the ground truth
k, v := itr.Key(), itr.Value()
k2, v2 := itr2.Key(), itr2.Value()
require.Equal(t, k, k2)
require.Equal(t, v, v2)
// check they match the counter
require.Equal(t, k, keyFmt(krc.key()))
itr.Next()
itr2.Next()
i++
}
require.False(t, itr.Valid())
require.False(t, itr2.Valid())
require.NoError(t, itr.Close())
require.NoError(t, itr2.Close())
}
func assertIterateDomainCompare(t *testing.T, st types.KVStore, mem dbm.DB) {
t.Helper()
// iterate over each and check they match the other
itr := st.Iterator(nil, nil)
itr2, err := mem.Iterator(nil, nil) // ground truth
require.NoError(t, err)
checkIterators(t, itr, itr2)
checkIterators(t, itr2, itr)
require.NoError(t, itr.Close())
require.NoError(t, itr2.Close())
}
func checkIterators(t *testing.T, itr, itr2 types.Iterator) {
t.Helper()
for ; itr.Valid(); itr.Next() {
require.True(t, itr2.Valid())
k, v := itr.Key(), itr.Value()
k2, v2 := itr2.Key(), itr2.Value()
require.Equal(t, k, k2)
require.Equal(t, v, v2)
itr2.Next()
}
require.False(t, itr.Valid())
require.False(t, itr2.Valid())
}
//--------------------------------------------------------
func setRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) {
t.Helper()
for i := start; i < end; i++ {
st.Set(keyFmt(i), valFmt(i))
err := mem.Set(keyFmt(i), valFmt(i))
require.NoError(t, err)
}
}
func deleteRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) {
t.Helper()
for i := start; i < end; i++ {
st.Delete(keyFmt(i))
err := mem.Delete(keyFmt(i))
require.NoError(t, err)
}
}
//--------------------------------------------------------
type keyRange struct {
start int
end int
}
func (kr keyRange) len() int {
return kr.end - kr.start
}
func newKeyRangeCounter(kr []keyRange) *keyRangeCounter {
return &keyRangeCounter{keyRanges: kr}
}
// we can iterate over this and make sure our real iterators have all the right keys
type keyRangeCounter struct {
rangeIdx int
idx int
keyRanges []keyRange
}
func (krc *keyRangeCounter) valid() bool {
maxRangeIdx := len(krc.keyRanges) - 1
maxRange := krc.keyRanges[maxRangeIdx]
// if we're not in the max range, we're valid
if krc.rangeIdx <= maxRangeIdx &&
krc.idx < maxRange.len() {
return true
}
return false
}
func (krc *keyRangeCounter) next() {
thisKeyRange := krc.keyRanges[krc.rangeIdx]
if krc.idx == thisKeyRange.len()-1 {
krc.rangeIdx++
krc.idx = 0
} else {
krc.idx++
}
}
func (krc *keyRangeCounter) key() int {
thisKeyRange := krc.keyRanges[krc.rangeIdx]
return thisKeyRange.start + krc.idx
}
//--------------------------------------------------------
func bz(s string) []byte { return []byte(s) }
func BenchmarkCacheKVStoreGetNoKeyFound(b *testing.B) {
b.ReportAllocs()
st := newCacheKVStore()
b.ResetTimer()
// assumes b.N < 2**24
for i := 0; i < b.N; i++ {
st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)})
}
}
func BenchmarkCacheKVStoreGetKeyFound(b *testing.B) {
b.ReportAllocs()
st := newCacheKVStore()
for i := 0; i < b.N; i++ {
arr := []byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}
st.Set(arr, arr)
}
b.ResetTimer()
// assumes b.N < 2**24
for i := 0; i < b.N; i++ {
st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)})
}
}

View File

@ -1,170 +0,0 @@
package cachemulti
import (
"fmt"
"io"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
// storeNameCtxKey is the TraceContext metadata key that identifies
// the store which emitted a given trace.
const storeNameCtxKey = "store_name"
//----------------------------------------
// Store
// Store holds many branched stores.
// Implements MultiStore.
// NOTE: a Store (and MultiStores in general) should never expose the
// keys for the substores.
type Store struct {
db types.CacheKVStore
stores map[types.StoreKey]types.CacheWrap
keys map[string]types.StoreKey
traceWriter io.Writer
traceContext types.TraceContext
}
var _ types.CacheMultiStore = Store{}
// NewFromKVStore creates a new Store object from a mapping of store keys to
// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store
// is a branched store.
func NewFromKVStore(
store types.KVStore, stores map[types.StoreKey]types.CacheWrapper,
keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext,
) Store {
cms := Store{
db: cachekv.NewStore(store),
stores: make(map[types.StoreKey]types.CacheWrap, len(stores)),
keys: keys,
traceWriter: traceWriter,
traceContext: traceContext,
}
for key, store := range stores {
if cms.TracingEnabled() {
tctx := cms.traceContext.Clone().Merge(types.TraceContext{
storeNameCtxKey: key.Name(),
})
store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx)
}
cms.stores[key] = cachekv.NewStore(store.(types.KVStore))
}
return cms
}
// NewStore creates a new Store object from a mapping of store keys to
// CacheWrapper objects. Each CacheWrapper store is a branched store.
func NewStore(
db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey,
traceWriter io.Writer, traceContext types.TraceContext,
) Store {
return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext)
}
func newCacheMultiStoreFromCMS(cms Store) Store {
stores := make(map[types.StoreKey]types.CacheWrapper)
for k, v := range cms.stores {
stores[k] = v
}
return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext)
}
// SetTracer sets the tracer for the MultiStore that the underlying
// stores will utilize to trace operations. A MultiStore is returned.
func (cms Store) SetTracer(w io.Writer) types.MultiStore {
cms.traceWriter = w
return cms
}
// SetTracingContext updates the tracing context for the MultiStore by merging
// the given context with the existing context by key. Any existing keys will
// be overwritten. It is implied that the caller should update the context when
// necessary between tracing operations. It returns a modified MultiStore.
func (cms Store) SetTracingContext(tc types.TraceContext) types.MultiStore {
if cms.traceContext != nil {
for k, v := range tc {
cms.traceContext[k] = v
}
} else {
cms.traceContext = tc
}
return cms
}
// TracingEnabled returns if tracing is enabled for the MultiStore.
func (cms Store) TracingEnabled() bool {
return cms.traceWriter != nil
}
// LatestVersion returns the branch version of the store
func (cms Store) LatestVersion() int64 {
panic("cannot get latest version from branch cached multi-store")
}
// GetStoreType returns the type of the store.
func (cms Store) GetStoreType() types.StoreType {
return types.StoreTypeMulti
}
// Write calls Write on each underlying store.
func (cms Store) Write() {
cms.db.Write()
for _, store := range cms.stores {
store.Write()
}
}
// Implements CacheWrapper.
func (cms Store) CacheWrap() types.CacheWrap {
return cms.CacheMultiStore().(types.CacheWrap)
}
// CacheWrapWithTrace implements the CacheWrapper interface.
func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap {
return cms.CacheWrap()
}
// Implements MultiStore.
func (cms Store) CacheMultiStore() types.CacheMultiStore {
return newCacheMultiStoreFromCMS(cms)
}
// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic
// as an already cached multi-store cannot load previous versions.
//
// TODO: The store implementation can possibly be modified to support this as it
// seems safe to load previous versions (heights).
func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) {
panic("cannot branch cached multi-store with a version")
}
// GetStore returns an underlying Store by key.
func (cms Store) GetStore(key types.StoreKey) types.Store {
s := cms.stores[key]
if key == nil || s == nil {
panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key))
}
return s.(types.Store)
}
// GetKVStore returns an underlying KVStore by key.
func (cms Store) GetKVStore(key types.StoreKey) types.KVStore {
store := cms.stores[key]
if key == nil || store == nil {
panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key))
}
return store.(types.KVStore)
}

View File

@ -1,24 +0,0 @@
package cachemulti
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/types"
)
func TestStoreGetKVStore(t *testing.T) {
require := require.New(t)
s := Store{stores: map[types.StoreKey]types.CacheWrap{}}
key := types.NewKVStoreKey("abc")
errMsg := fmt.Sprintf("kv store with key %v has not been registered in stores", key)
require.PanicsWithValue(errMsg,
func() { s.GetStore(key) })
require.PanicsWithValue(errMsg,
func() { s.GetKVStore(key) })
}

39
store/changeset.go Normal file
View File

@ -0,0 +1,39 @@
package store
// KVPair defines a key-value pair with additional metadata that is used to
// track writes. Deletion can be denoted by a nil value or explicitly by the
// Delete field.
type KVPair struct {
Key []byte
Value []byte
StoreKey string // optional
}
// Changeset defines a set of KVPair entries.
type Changeset struct {
Pairs []KVPair
}
func NewChangeset(pairs ...KVPair) *Changeset {
return &Changeset{
Pairs: pairs,
}
}
// Size returns the number of key-value pairs in the batch.
func (cs *Changeset) Size() int {
return len(cs.Pairs)
}
// Add adds a key-value pair to the ChangeSet.
func (cs *Changeset) Add(key, value []byte) {
cs.Pairs = append(cs.Pairs, KVPair{
Key: key,
Value: value,
})
}
// AddKVPair adds a KVPair to the ChangeSet.
func (cs *Changeset) AddKVPair(pair KVPair) {
cs.Pairs = append(cs.Pairs, pair)
}

82
store/commit_info.go Normal file
View File

@ -0,0 +1,82 @@
package store
import (
"fmt"
"time"
"cosmossdk.io/store/v2/internal/maps"
)
type (
// CommitHeader defines the interface for a block header that can be provided
// to a MultiStore upon Commit. This should be optional and used to facilitate
// time-based queries only.
CommitHeader interface {
GetTime() time.Time
GetHeight() uint64
}
// CommitInfo defines commit information used by the multi-store when committing
// a version/height.
CommitInfo struct {
Version uint64
StoreInfos []StoreInfo
Timestamp time.Time
}
// StoreInfo defines store-specific commit information. It contains a reference
// between a store name/key and the commit ID.
StoreInfo struct {
Name string
CommitID CommitID
}
// CommitID defines the commitment information when a specific store is
// committed.
CommitID struct {
Version uint64
Hash []byte
}
)
func (si StoreInfo) GetHash() []byte {
return si.CommitID.Hash
}
// Hash returns the root hash of all committed stores represented by CommitInfo,
// sorted by store name/key.
func (ci CommitInfo) Hash() []byte {
if len(ci.StoreInfos) == 0 {
return nil
}
rootHash, _, _ := maps.ProofsFromMap(ci.toMap())
return rootHash
}
func (ci CommitInfo) toMap() map[string][]byte {
m := make(map[string][]byte, len(ci.StoreInfos))
for _, storeInfo := range ci.StoreInfos {
m[storeInfo.Name] = storeInfo.GetHash()
}
return m
}
func (ci CommitInfo) CommitID() CommitID {
return CommitID{
Version: ci.Version,
Hash: ci.Hash(),
}
}
func (m *CommitInfo) GetVersion() uint64 {
if m != nil {
return m.Version
}
return 0
}
func (cid CommitID) String() string {
return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version)
}

View File

@ -0,0 +1,3 @@
# State Commitment (SC)
TODO

80
store/commitment/db.go Normal file
View File

@ -0,0 +1,80 @@
package commitment
import (
"sync"
ics23 "github.com/cosmos/ics23/go"
"cosmossdk.io/store/v2"
)
// Database represents a state commitment store. It is designed to securely store
// and manage the most recent state information, crucial for achieving consensus.
// Each module creates its own instance of Database for managing its specific state.
type Database struct {
mu sync.Mutex
tree store.Tree
}
// NewDatabase creates a new Database instance.
func NewDatabase(tree store.Tree) *Database {
return &Database{
tree: tree,
}
}
// WriteBatch writes a batch of key-value pairs to the database.
func (db *Database) WriteBatch(cs *store.Changeset) error {
db.mu.Lock()
defer db.mu.Unlock()
return db.tree.WriteBatch(cs)
}
// WorkingHash returns the working hash of the database.
func (db *Database) WorkingHash() []byte {
db.mu.Lock()
defer db.mu.Unlock()
return db.tree.WorkingHash()
}
// LoadVersion loads the state at the given version.
func (db *Database) LoadVersion(version uint64) error {
db.mu.Lock()
defer db.mu.Unlock()
return db.tree.LoadVersion(version)
}
// Commit commits the current state to the database.
func (db *Database) Commit() ([]byte, error) {
db.mu.Lock()
defer db.mu.Unlock()
return db.tree.Commit()
}
// GetProof returns a proof for the given key and version.
func (db *Database) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) {
db.mu.Lock()
defer db.mu.Unlock()
return db.tree.GetProof(version, key)
}
// GetLatestVersion returns the latest version of the database.
func (db *Database) GetLatestVersion() uint64 {
db.mu.Lock()
defer db.mu.Unlock()
return db.tree.GetLatestVersion()
}
// Close closes the database and releases all resources.
func (db *Database) Close() error {
db.mu.Lock()
defer db.mu.Unlock()
return db.tree.Close()
}

View File

@ -0,0 +1,84 @@
package commitment
import (
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/commitment/iavl"
)
func generateTree(treeType string) store.Tree {
if treeType == "iavl" {
cfg := iavl.DefaultConfig()
db := dbm.NewMemDB()
tree := iavl.NewIavlTree(db, log.NewNopLogger(), cfg)
return tree
}
return nil
}
func TestIavlTree(t *testing.T) {
// generate a new tree
tree := generateTree("iavl")
require.NotNil(t, tree)
initVersion := tree.GetLatestVersion()
require.Equal(t, uint64(0), initVersion)
// write a batch of version 1
cs1 := store.NewChangeset()
cs1.Add([]byte("key1"), []byte("value1"))
cs1.Add([]byte("key2"), []byte("value2"))
cs1.Add([]byte("key3"), []byte("value3"))
err := tree.WriteBatch(cs1)
require.NoError(t, err)
workingHash := tree.WorkingHash()
require.NotNil(t, workingHash)
require.Equal(t, uint64(0), tree.GetLatestVersion())
// commit the batch
commitHash, err := tree.Commit()
require.NoError(t, err)
require.Equal(t, workingHash, commitHash)
require.Equal(t, uint64(1), tree.GetLatestVersion())
version1Hash := tree.WorkingHash()
// write a batch of version 2
cs2 := store.NewChangeset()
cs2.Add([]byte("key4"), []byte("value4"))
cs2.Add([]byte("key5"), []byte("value5"))
cs2.Add([]byte("key6"), []byte("value6"))
cs2.Add([]byte("key1"), nil) // delete key1
err = tree.WriteBatch(cs2)
require.NoError(t, err)
workingHash = tree.WorkingHash()
require.NotNil(t, workingHash)
commitHash, err = tree.Commit()
require.NoError(t, err)
require.Equal(t, workingHash, commitHash)
// get proof for key1
proof, err := tree.GetProof(1, []byte("key1"))
require.NoError(t, err)
require.NotNil(t, proof.GetExist())
proof, err = tree.GetProof(2, []byte("key1"))
require.NoError(t, err)
require.NotNil(t, proof.GetNonexist())
// load version 1
err = tree.LoadVersion(1)
require.NoError(t, err)
require.Equal(t, version1Hash, tree.WorkingHash())
// close the db
require.NoError(t, tree.Close())
}

View File

@ -0,0 +1,15 @@
package iavl
// Config is the configuration for the IAVL tree.
type Config struct {
CacheSize int `mapstructure:"cache_size"`
SkipFastStorageUpgrade bool `mapstructure:"skip_fast_storage_upgrade"`
}
// DefaultConfig returns the default configuration for the IAVL tree.
func DefaultConfig() *Config {
return &Config{
CacheSize: 1000,
SkipFastStorageUpgrade: false,
}
}

View File

@ -0,0 +1,84 @@
package iavl
import (
"fmt"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
ics23 "github.com/cosmos/ics23/go"
log "cosmossdk.io/log"
"cosmossdk.io/store/v2"
)
var _ store.Tree = (*IavlTree)(nil)
// IavlTree is a wrapper around iavl.MutableTree.
type IavlTree struct {
tree *iavl.MutableTree
}
// NewIavlTree creates a new IavlTree instance.
func NewIavlTree(db dbm.DB, logger log.Logger, cfg *Config) *IavlTree {
tree := iavl.NewMutableTree(db, cfg.CacheSize, cfg.SkipFastStorageUpgrade, logger)
return &IavlTree{
tree: tree,
}
}
// WriteBatch writes a batch of key-value pairs to the database.
func (t *IavlTree) WriteBatch(cs *store.Changeset) error {
for _, kv := range cs.Pairs {
if kv.Value == nil {
_, res, err := t.tree.Remove(kv.Key)
if err != nil {
return err
}
if !res {
return fmt.Errorf("failed to delete key %X", kv.Key)
}
} else {
_, err := t.tree.Set(kv.Key, kv.Value)
if err != nil {
return err
}
}
}
return nil
}
// WorkingHash returns the working hash of the database.
func (t *IavlTree) WorkingHash() []byte {
return t.tree.WorkingHash()
}
// LoadVersion loads the state at the given version.
func (t *IavlTree) LoadVersion(version uint64) error {
return t.tree.LoadVersionForOverwriting(int64(version))
}
// Commit commits the current state to the database.
func (t *IavlTree) Commit() ([]byte, error) {
hash, _, err := t.tree.SaveVersion()
return hash, err
}
// GetProof returns a proof for the given key and version.
func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) {
imutableTree, err := t.tree.GetImmutable(int64(version))
if err != nil {
return nil, err
}
return imutableTree.GetProof(key)
}
// GetLatestVersion returns the latest version of the database.
func (t *IavlTree) GetLatestVersion() uint64 {
return uint64(t.tree.Version())
}
// Close closes the iavl tree.
func (t *IavlTree) Close() error {
return nil
}

69
store/database.go Normal file
View File

@ -0,0 +1,69 @@
package store
import (
"io"
)
// Reader wraps the Has and Get method of a backing data store.
type Reader interface {
// Has retrieves if a key is present in the key-value data store.
//
// Note: <key> is safe to modify and read after calling Has.
Has(storeKey string, key []byte) (bool, error)
// Get retrieves the given key if it's present in the key-value data store.
//
// Note: <key> is safe to modify and read after calling Get.
// The returned byte slice is safe to read, but cannot be modified.
Get(storeKey string, key []byte) ([]byte, error)
}
// Writer wraps the Set method of a backing data store.
type Writer interface {
// Set inserts the given value into the key-value data store.
//
// Note: <key, value> are safe to modify and read after calling Set.
Set(storeKey string, key, value []byte) error
// Delete removes the key from the backing key-value data store.
//
// Note: <key> is safe to modify and read after calling Delete.
Delete(storeKey string, key []byte) error
}
// Database contains all the methods required to allow handling different
// key-value data stores backing the database.
type Database interface {
Reader
Writer
IteratorCreator
io.Closer
}
// VersionedDatabase defines an API for a versioned database that allows reads,
// writes, iteration and commitment over a series of versions.
type VersionedDatabase interface {
Has(storeKey string, version uint64, key []byte) (bool, error)
Get(storeKey string, version uint64, key []byte) ([]byte, error)
GetLatestVersion() (uint64, error)
SetLatestVersion(version uint64) error
Iterator(storeKey string, version uint64, start, end []byte) (Iterator, error)
ReverseIterator(storeKey string, version uint64, start, end []byte) (Iterator, error)
ApplyChangeset(version uint64, cs *Changeset) error
// Prune attempts to prune all versions up to and including the provided
// version argument. The operation should be idempotent. An error should be
// returned upon failure.
Prune(version uint64) error
// Close releases associated resources. It should NOT be idempotent. It must
// only be called once and any call after may panic.
io.Closer
}
// Committer defines a contract for committing state.
type Committer interface {
Commit() error
}

View File

@ -1,90 +0,0 @@
package dbadapter
import (
"io"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
// Wrapper type for dbm.Db with implementation of KVStore
type Store struct {
dbm.DB
}
// Get wraps the underlying DB's Get method panicing on error.
func (dsa Store) Get(key []byte) []byte {
v, err := dsa.DB.Get(key)
if err != nil {
panic(err)
}
return v
}
// Has wraps the underlying DB's Has method panicing on error.
func (dsa Store) Has(key []byte) bool {
ok, err := dsa.DB.Has(key)
if err != nil {
panic(err)
}
return ok
}
// Set wraps the underlying DB's Set method panicing on error.
func (dsa Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
if err := dsa.DB.Set(key, value); err != nil {
panic(err)
}
}
// Delete wraps the underlying DB's Delete method panicing on error.
func (dsa Store) Delete(key []byte) {
if err := dsa.DB.Delete(key); err != nil {
panic(err)
}
}
// Iterator wraps the underlying DB's Iterator method panicing on error.
func (dsa Store) Iterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.Iterator(start, end)
if err != nil {
panic(err)
}
return iter
}
// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error.
func (dsa Store) ReverseIterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return iter
}
// GetStoreType returns the type of the store.
func (Store) GetStoreType() types.StoreType {
return types.StoreTypeDB
}
// CacheWrap branches the underlying store.
func (dsa Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(dsa)
}
// CacheWrapWithTrace implements KVStore.
func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(dsa, w, tc))
}
// dbm.DB implements KVStore so we can CacheKVStore it.
var _ types.KVStore = Store{}

View File

@ -1,86 +0,0 @@
package dbadapter_test
import (
"bytes"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/mock"
"cosmossdk.io/store/types"
)
var errFoo = errors.New("dummy")
func TestAccessors(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDB := mock.NewMockDB(mockCtrl)
store := dbadapter.Store{mockDB}
key := []byte("test")
value := []byte("testvalue")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Equal(t, types.StoreTypeDB, store.GetStoreType())
store.GetStoreType()
retFoo := []byte("xxx")
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(retFoo, nil)
require.True(t, bytes.Equal(retFoo, store.Get(key)))
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Get(key) })
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(true, nil)
require.True(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, nil)
require.False(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, errFoo)
require.Panics(t, func() { store.Has(key) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Delete(key) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Delete(key) })
start, end := []byte("start"), []byte("end")
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil)
require.NotPanics(t, func() { store.Iterator(start, end) })
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Iterator(start, end) })
mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil)
require.NotPanics(t, func() { store.ReverseIterator(start, end) })
mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.ReverseIterator(start, end) })
}
func TestCacheWraps(t *testing.T) {
mockCtrl := gomock.NewController(t)
mockDB := mock.NewMockDB(mockCtrl)
store := dbadapter.Store{mockDB}
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}

View File

@ -1,14 +1,16 @@
package types
package store
import (
"cosmossdk.io/errors"
)
// StoreCodespace defines the store package's unique error code space.
const StoreCodespace = "store"
var (
// ErrInvalidProof is returned when a proof is invalid
ErrInvalidProof = errors.Register(StoreCodespace, 2, "invalid proof")
// ErrTxDecode is returned if we cannot parse a transaction
ErrTxDecode = errors.Register(StoreCodespace, 3, "tx parse error")
@ -22,7 +24,15 @@ var (
// ErrConflict defines a conflict error, e.g. when two goroutines try to access
// the same resource and one of them fails.
ErrConflict = errors.Register(StoreCodespace, 6, "conflict")
// ErrInvalidRequest defines an ABCI typed error where the request contains
// invalid data.
ErrInvalidRequest = errors.Register(StoreCodespace, 7, "invalid request")
ErrClosed = errors.Register(StoreCodespace, 8, "closed")
ErrRecordNotFound = errors.Register(StoreCodespace, 9, "record not found")
ErrUnknownStoreKey = errors.Register(StoreCodespace, 10, "unknown store key")
ErrInvalidVersion = errors.Register(StoreCodespace, 11, "invalid version")
ErrKeyEmpty = errors.Register(StoreCodespace, 12, "key empty")
ErrStartAfterEnd = errors.Register(StoreCodespace, 13, "start key after end key")
)

View File

@ -1,176 +0,0 @@
package gaskv
import (
"io"
"cosmossdk.io/store/types"
)
var _ types.KVStore = &Store{}
// Store applies gas tracking to an underlying KVStore. It implements the
// KVStore interface.
type Store struct {
gasMeter types.GasMeter
gasConfig types.GasConfig
parent types.KVStore
}
// NewStore returns a reference to a new GasKVStore.
func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store {
kvs := &Store{
gasMeter: gasMeter,
gasConfig: gasConfig,
parent: parent,
}
return kvs
}
// Implements Store.
func (gs *Store) GetStoreType() types.StoreType {
return gs.parent.GetStoreType()
}
// Implements KVStore.
func (gs *Store) Get(key []byte) (value []byte) {
gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc)
value = gs.parent.Get(key)
// TODO overflow-safe math?
gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasReadPerByteDesc)
gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc)
return value
}
// Implements KVStore.
func (gs *Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc)
// TODO overflow-safe math?
gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(key)), types.GasWritePerByteDesc)
gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc)
gs.parent.Set(key, value)
}
// Implements KVStore.
func (gs *Store) Has(key []byte) bool {
gs.gasMeter.ConsumeGas(gs.gasConfig.HasCost, types.GasHasDesc)
return gs.parent.Has(key)
}
// Implements KVStore.
func (gs *Store) Delete(key []byte) {
// charge gas to prevent certain attack vectors even though space is being freed
gs.gasMeter.ConsumeGas(gs.gasConfig.DeleteCost, types.GasDeleteDesc)
gs.parent.Delete(key)
}
// Iterator implements the KVStore interface. It returns an iterator which
// incurs a flat gas cost for seeking to the first key/value pair and a variable
// gas cost based on the current value's length if the iterator is valid.
func (gs *Store) Iterator(start, end []byte) types.Iterator {
return gs.iterator(start, end, true)
}
// ReverseIterator implements the KVStore interface. It returns a reverse
// iterator which incurs a flat gas cost for seeking to the first key/value pair
// and a variable gas cost based on the current value's length if the iterator
// is valid.
func (gs *Store) ReverseIterator(start, end []byte) types.Iterator {
return gs.iterator(start, end, false)
}
// Implements KVStore.
func (gs *Store) CacheWrap() types.CacheWrap {
panic("cannot CacheWrap a GasKVStore")
}
// CacheWrapWithTrace implements the KVStore interface.
func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap {
panic("cannot CacheWrapWithTrace a GasKVStore")
}
func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator {
var parent types.Iterator
if ascending {
parent = gs.parent.Iterator(start, end)
} else {
parent = gs.parent.ReverseIterator(start, end)
}
gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent)
gi.(*gasIterator).consumeSeekGas()
return gi
}
type gasIterator struct {
gasMeter types.GasMeter
gasConfig types.GasConfig
parent types.Iterator
}
func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.Iterator) types.Iterator {
return &gasIterator{
gasMeter: gasMeter,
gasConfig: gasConfig,
parent: parent,
}
}
// Implements Iterator.
func (gi *gasIterator) Domain() (start, end []byte) {
return gi.parent.Domain()
}
// Implements Iterator.
func (gi *gasIterator) Valid() bool {
return gi.parent.Valid()
}
// Next implements the Iterator interface. It seeks to the next key/value pair
// in the iterator. It incurs a flat gas cost for seeking and a variable gas
// cost based on the current value's length if the iterator is valid.
func (gi *gasIterator) Next() {
gi.consumeSeekGas()
gi.parent.Next()
}
// Key implements the Iterator interface. It returns the current key and it does
// not incur any gas cost.
func (gi *gasIterator) Key() (key []byte) {
key = gi.parent.Key()
return key
}
// Value implements the Iterator interface. It returns the current value and it
// does not incur any gas cost.
func (gi *gasIterator) Value() (value []byte) {
value = gi.parent.Value()
return value
}
// Implements Iterator.
func (gi *gasIterator) Close() error {
return gi.parent.Close()
}
// Error delegates the Error call to the parent iterator.
func (gi *gasIterator) Error() error {
return gi.parent.Error()
}
// consumeSeekGas consumes on each iteration step a flat gas cost and a variable gas cost
// based on the current value's length.
func (gi *gasIterator) consumeSeekGas() {
if gi.Valid() {
key := gi.Key()
value := gi.Value()
gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasValuePerByteDesc)
gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc)
}
gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc)
}

View File

@ -1,120 +0,0 @@
package gaskv_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/gaskv"
"cosmossdk.io/store/types"
)
func bz(s string) []byte { return []byte(s) }
func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) }
func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) }
func TestGasKVStoreBasic(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(10000)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.Equal(t, types.StoreTypeDB, st.GetStoreType())
require.Panics(t, func() { st.CacheWrap() })
require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) })
require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
st.Set(keyFmt(1), valFmt(1))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
st.Delete(keyFmt(1))
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
require.Equal(t, meter.GasConsumed(), types.Gas(6858))
}
func TestGasKVStoreIterator(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(100000)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.False(t, st.Has(keyFmt(1)))
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
require.Empty(t, st.Get(keyFmt(2)), "Expected `key2` to be empty")
require.Empty(t, st.Get(keyFmt(3)), "Expected `key3` to be empty")
st.Set(keyFmt(1), valFmt(1))
require.True(t, st.Has(keyFmt(1)))
st.Set(keyFmt(2), valFmt(2))
require.True(t, st.Has(keyFmt(2)))
st.Set(keyFmt(3), valFmt(0))
iterator := st.Iterator(nil, nil)
start, end := iterator.Domain()
require.Nil(t, start)
require.Nil(t, end)
require.NoError(t, iterator.Error())
t.Cleanup(func() {
if err := iterator.Close(); err != nil {
t.Fatal(err)
}
})
ka := iterator.Key()
require.Equal(t, ka, keyFmt(1))
va := iterator.Value()
require.Equal(t, va, valFmt(1))
iterator.Next()
kb := iterator.Key()
require.Equal(t, kb, keyFmt(2))
vb := iterator.Value()
require.Equal(t, vb, valFmt(2))
iterator.Next()
require.Equal(t, types.Gas(14565), meter.GasConsumed())
kc := iterator.Key()
require.Equal(t, kc, keyFmt(3))
vc := iterator.Value()
require.Equal(t, vc, valFmt(0))
iterator.Next()
require.Equal(t, types.Gas(14667), meter.GasConsumed())
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.Equal(t, types.Gas(14697), meter.GasConsumed())
require.NoError(t, iterator.Error())
reverseIterator := st.ReverseIterator(nil, nil)
t.Cleanup(func() {
if err := reverseIterator.Close(); err != nil {
t.Fatal(err)
}
})
require.Equal(t, reverseIterator.Key(), keyFmt(3))
reverseIterator.Next()
require.Equal(t, reverseIterator.Key(), keyFmt(2))
reverseIterator.Next()
require.Equal(t, reverseIterator.Key(), keyFmt(1))
reverseIterator.Next()
require.False(t, reverseIterator.Valid())
require.Panics(t, reverseIterator.Next)
require.Equal(t, types.Gas(15135), meter.GasConsumed())
}
func TestGasKVStoreOutOfGasSet(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(0)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.Panics(t, func() { st.Set(keyFmt(1), valFmt(1)) }, "Expected out-of-gas")
}
func TestGasKVStoreOutOfGasIterator(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(20000)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
st.Set(keyFmt(1), valFmt(1))
iterator := st.Iterator(nil, nil)
iterator.Next()
require.Panics(t, func() { iterator.Value() }, "Expected out-of-gas")
}

View File

@ -1,79 +1,83 @@
module cosmossdk.io/store
module cosmossdk.io/store/v2
go 1.20
go 1.21
require (
cosmossdk.io/errors v1.0.0
cosmossdk.io/log v1.2.1
cosmossdk.io/math v1.1.3-rc.1
github.com/cockroachdb/errors v1.11.1
github.com/cockroachdb/pebble v0.0.0-20230819001538-1798fbf5956c
github.com/cometbft/cometbft v0.38.0
github.com/cosmos/cosmos-db v1.0.0
github.com/cosmos/gogoproto v1.4.11
github.com/cosmos/iavl v1.0.0-rc.1
github.com/cosmos/ics23/go v0.10.0
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.3 // indirect
github.com/hashicorp/go-hclog v1.5.0
github.com/hashicorp/go-plugin v1.5.2
github.com/hashicorp/golang-lru v1.0.2
github.com/spf13/cast v1.5.1 // indirect
github.com/linxGnu/grocksdb v1.8.4
github.com/stretchr/testify v1.8.4
github.com/tidwall/btree v1.7.0
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.31.0
gotest.tools/v3 v3.5.1
modernc.org/sqlite v1.25.0
)
require github.com/hashicorp/go-metrics v0.5.1
require (
github.com/DataDog/zstd v1.5.5 // indirect
github.com/DataDog/zstd v1.4.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230525220056-bb4fc9527b3b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/emicklei/dot v1.4.2 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/getsentry/sentry-go v0.23.0 // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/hashicorp/go-immutable-radix v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.1 // indirect
github.com/hashicorp/yamux v0.1.1 // indirect
github.com/jhump/protoreflect v1.15.3 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/linxGnu/grocksdb v1.7.16 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae // indirect
github.com/oklog/run v1.1.0 // indirect
github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d // indirect
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/rs/zerolog v1.31.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.9.0 // indirect
github.com/rs/zerolog v1.30.0 // indirect
github.com/sasha-s/go-deadlock v0.3.1 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
github.com/spf13/cast v1.5.1 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.5.1 // indirect
lukechampine.com/uint128 v1.2.0 // indirect
modernc.org/cc/v3 v3.40.0 // indirect
modernc.org/ccgo/v3 v3.16.13 // indirect
modernc.org/libc v1.24.1 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.6.0 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/strutil v1.1.3 // indirect
modernc.org/token v1.0.1 // indirect
)

View File

@ -4,39 +4,30 @@ cosmossdk.io/log v1.2.1 h1:Xc1GgTCicniwmMiKwDxUjO4eLhPxoVdI9vtMW8Ti/uk=
cosmossdk.io/log v1.2.1/go.mod h1:GNSCc/6+DhFIj1aLn/j7Id7PaO8DzNylUZoOYBL9+I4=
cosmossdk.io/math v1.1.3-rc.1 h1:NebCNWDqb1MJRNfvxr4YY7d8FSYgkuB3L75K6xvM+Zo=
cosmossdk.io/math v1.1.3-rc.1/go.mod h1:l2Gnda87F0su8a/7FEKJfFdJrM0JZRXQaohlgJeyQh0=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v0.0.0-20230525220056-bb4fc9527b3b h1:LCs8gDhg6vt8A3dN7AEJxmCoETZ4qkySoVJVm3rcSJk=
github.com/cockroachdb/pebble v0.0.0-20230525220056-bb4fc9527b3b/go.mod h1:TkdVsGYRqtULUppt2RbC+YaKtTHnHoWa2apfFrSKABw=
github.com/cockroachdb/pebble v0.0.0-20230819001538-1798fbf5956c h1:aDetJlMe4qJxWAwu+/bzTs2/b1EW9ecVyawpRD7N/tE=
github.com/cockroachdb/pebble v0.0.0-20230819001538-1798fbf5956c/go.mod h1:EDjiaAXc0FXiRmxDzcu1wIEJ093ohHMUWxrI6iku0XA=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/cometbft/cometbft v0.38.0 h1:ogKnpiPX7gxCvqTEF4ly25/wAxUqf181t30P3vqdpdc=
github.com/cometbft/cometbft v0.38.0/go.mod h1:5Jz0Z8YsHSf0ZaAqGvi/ifioSdVFPtEGrm8Y9T/993k=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
@ -49,40 +40,33 @@ github.com/cosmos/iavl v1.0.0-rc.1/go.mod h1:CmTGqMnRnucjxbjduneZXT+0vPgNElYvdef
github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM=
github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/emicklei/dot v1.4.2 h1:UbK6gX4yvrpHKlxuUQicwoAis4zl8Dzwit9SnbBAXWw=
github.com/emicklei/dot v1.4.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE=
github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@ -90,7 +74,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
@ -101,235 +84,145 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-metrics v0.5.1 h1:rfPwUqFU6uZXNvGl4hzjY8LEBsqFVU4si1H9/Hqck/U=
github.com/hashicorp/go-metrics v0.5.1/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE=
github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y=
github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso=
github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls=
github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8=
github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo=
github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:FatpGJD2jmJfhZiFDElaC0QhZUDQnxUeAwTGkfAHN3I=
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s=
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d h1:htwtWgtQo8YS6JFWWi2DNgY0RwSGJ1ruMoxY6CUUclk=
github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c=
github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w=
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@ -342,22 +235,43 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM=
modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak=
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o=
modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.25.0 h1:AFweiwPNd/b3BoKnBOfFm+Y260guGMF+0UFk0savqeA=
modernc.org/sqlite v1.25.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU=
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY=
modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c=
modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg=
modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY=
modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE=

View File

@ -1,416 +0,0 @@
package iavl
import (
"errors"
"fmt"
"io"
cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
ics23 "github.com/cosmos/ics23/go"
errorsmod "cosmossdk.io/errors"
"cosmossdk.io/log"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/metrics"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
const (
DefaultIAVLCacheSize = 500000
)
var (
_ types.KVStore = (*Store)(nil)
_ types.CommitStore = (*Store)(nil)
_ types.CommitKVStore = (*Store)(nil)
_ types.Queryable = (*Store)(nil)
_ types.StoreWithInitialVersion = (*Store)(nil)
)
// Store Implements types.KVStore and CommitKVStore.
type Store struct {
tree Tree
logger log.Logger
metrics metrics.StoreMetrics
}
// LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the
// store's version (id) from the provided DB. An error is returned if the version
// fails to load, or if called with a positive version on an empty tree.
func LoadStore(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) {
return LoadStoreWithInitialVersion(db, logger, key, id, 0, cacheSize, disableFastNode, metrics)
}
// LoadStoreWithInitialVersion returns an IAVL Store as a CommitKVStore setting its initialVersion
// to the one given. Internally, it will load the store's version (id) from the
// provided DB. An error is returned if the version fails to load, or if called with a positive
// version on an empty tree.
func LoadStoreWithInitialVersion(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, initialVersion uint64, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) {
tree := iavl.NewMutableTree(db, cacheSize, disableFastNode, logger, iavl.InitialVersionOption(initialVersion))
isUpgradeable, err := tree.IsUpgradeable()
if err != nil {
return nil, err
}
if isUpgradeable && logger != nil {
logger.Info(
"Upgrading IAVL storage for faster queries + execution on live state. This may take a while",
"store_key", key.String(),
"version", initialVersion,
"commit", fmt.Sprintf("%X", id),
)
}
_, err = tree.LoadVersion(id.Version)
if err != nil {
return nil, err
}
if logger != nil {
logger.Debug("Finished loading IAVL tree")
}
return &Store{
tree: tree,
logger: logger,
metrics: metrics,
}, nil
}
// UnsafeNewStore returns a reference to a new IAVL Store with a given mutable
// IAVL tree reference. It should only be used for testing purposes.
//
// CONTRACT: The IAVL tree should be fully loaded.
// CONTRACT: PruningOptions passed in as argument must be the same as pruning options
// passed into iavl.MutableTree
func UnsafeNewStore(tree *iavl.MutableTree) *Store {
return &Store{
tree: tree,
metrics: metrics.NewNoOpMetrics(),
}
}
// GetImmutable returns a reference to a new store backed by an immutable IAVL
// tree at a specific version (height) without any pruning options. This should
// be used for querying and iteration only. If the version does not exist or has
// been pruned, an empty immutable IAVL tree will be used.
// Any mutable operations executed will result in a panic.
func (st *Store) GetImmutable(version int64) (*Store, error) {
if !st.VersionExists(version) {
return nil, errors.New("version mismatch on immutable IAVL tree; version does not exist. Version has either been pruned, or is for a future block height")
}
iTree, err := st.tree.GetImmutable(version)
if err != nil {
return nil, err
}
return &Store{
tree: &immutableTree{iTree},
metrics: st.metrics,
}, nil
}
// Commit commits the current store state and returns a CommitID with the new
// version and hash.
func (st *Store) Commit() types.CommitID {
defer st.metrics.MeasureSince("store", "iavl", "commit")
hash, version, err := st.tree.SaveVersion()
if err != nil {
panic(err)
}
return types.CommitID{
Version: version,
Hash: hash,
}
}
// WorkingHash returns the hash of the current working tree.
func (st *Store) WorkingHash() []byte {
return st.tree.WorkingHash()
}
// LastCommitID implements Committer.
func (st *Store) LastCommitID() types.CommitID {
return types.CommitID{
Version: st.tree.Version(),
Hash: st.tree.Hash(),
}
}
// SetPruning panics as pruning options should be provided at initialization
// since IAVl accepts pruning options directly.
func (st *Store) SetPruning(_ pruningtypes.PruningOptions) {
panic("cannot set pruning options on an initialized IAVL store")
}
// SetPruning panics as pruning options should be provided at initialization
// since IAVl accepts pruning options directly.
func (st *Store) GetPruning() pruningtypes.PruningOptions {
panic("cannot get pruning options on an initialized IAVL store")
}
// VersionExists returns whether or not a given version is stored.
func (st *Store) VersionExists(version int64) bool {
return st.tree.VersionExists(version)
}
// GetAllVersions returns all versions in the iavl tree
func (st *Store) GetAllVersions() []int {
return st.tree.AvailableVersions()
}
// Implements Store.
func (st *Store) GetStoreType() types.StoreType {
return types.StoreTypeIAVL
}
// Implements Store.
func (st *Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(st)
}
// CacheWrapWithTrace implements the Store interface.
func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(st, w, tc))
}
// Implements types.KVStore.
func (st *Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
_, err := st.tree.Set(key, value)
if err != nil && st.logger != nil {
st.logger.Error("iavl set error", "error", err.Error())
}
}
// Implements types.KVStore.
func (st *Store) Get(key []byte) []byte {
defer st.metrics.MeasureSince("store", "iavl", "get")
value, err := st.tree.Get(key)
if err != nil {
panic(err)
}
return value
}
// Implements types.KVStore.
func (st *Store) Has(key []byte) (exists bool) {
defer st.metrics.MeasureSince("store", "iavl", "has")
has, err := st.tree.Has(key)
if err != nil {
panic(err)
}
return has
}
// Implements types.KVStore.
func (st *Store) Delete(key []byte) {
defer st.metrics.MeasureSince("store", "iavl", "delete")
_, _, err := st.tree.Remove(key)
if err != nil {
panic(err)
}
}
// DeleteVersionsTo deletes versions upto the given version from the MutableTree. An error
// is returned if any single version is invalid or the delete fails. All writes
// happen in a single batch with a single commit.
func (st *Store) DeleteVersionsTo(version int64) error {
return st.tree.DeleteVersionsTo(version)
}
// LoadVersionForOverwriting attempts to load a tree at a previously committed
// version. Any versions greater than targetVersion will be deleted.
func (st *Store) LoadVersionForOverwriting(targetVersion int64) error {
return st.tree.LoadVersionForOverwriting(targetVersion)
}
// Implements types.KVStore.
func (st *Store) Iterator(start, end []byte) types.Iterator {
iterator, err := st.tree.Iterator(start, end, true)
if err != nil {
panic(err)
}
return iterator
}
// Implements types.KVStore.
func (st *Store) ReverseIterator(start, end []byte) types.Iterator {
iterator, err := st.tree.Iterator(start, end, false)
if err != nil {
panic(err)
}
return iterator
}
// SetInitialVersion sets the initial version of the IAVL tree. It is used when
// starting a new chain at an arbitrary height.
func (st *Store) SetInitialVersion(version int64) {
st.tree.SetInitialVersion(uint64(version))
}
// Exports the IAVL store at the given version, returning an iavl.Exporter for the tree.
func (st *Store) Export(version int64) (*iavl.Exporter, error) {
istore, err := st.GetImmutable(version)
if err != nil {
return nil, errorsmod.Wrapf(err, "iavl export failed for version %v", version)
}
tree, ok := istore.tree.(*immutableTree)
if !ok || tree == nil {
return nil, fmt.Errorf("iavl export failed: unable to fetch tree for version %v", version)
}
return tree.Export()
}
// Import imports an IAVL tree at the given version, returning an iavl.Importer for importing.
func (st *Store) Import(version int64) (*iavl.Importer, error) {
tree, ok := st.tree.(*iavl.MutableTree)
if !ok {
return nil, errors.New("iavl import failed: unable to find mutable tree")
}
return tree.Import(version)
}
// Handle gatest the latest height, if height is 0
func getHeight(tree Tree, req *types.RequestQuery) int64 {
height := req.Height
if height == 0 {
latest := tree.Version()
if tree.VersionExists(latest - 1) {
height = latest - 1
} else {
height = latest
}
}
return height
}
// Query implements ABCI interface, allows queries
//
// by default we will return from (latest height -1),
// as we will have merkle proofs immediately (header height = data height + 1)
// If latest-1 is not present, use latest (which must be present)
// if you care to have the latest data to see a tx results, you must
// explicitly set the height you want to see
func (st *Store) Query(req *types.RequestQuery) (res *types.ResponseQuery, err error) {
defer st.metrics.MeasureSince("store", "iavl", "query")
if len(req.Data) == 0 {
return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrTxDecode, "query cannot be zero length")
}
tree := st.tree
// store the height we chose in the response, with 0 being changed to the
// latest height
res = &types.ResponseQuery{
Height: getHeight(tree, req),
}
switch req.Path {
case "/key": // get by key
key := req.Data // data holds the key bytes
res.Key = key
if !st.VersionExists(res.Height) {
res.Log = iavl.ErrVersionDoesNotExist.Error()
break
}
value, err := tree.GetVersioned(key, res.Height)
if err != nil {
panic(err)
}
res.Value = value
if !req.Prove {
break
}
// Continue to prove existence/absence of value
// Must convert store.Tree to iavl.MutableTree with given version to use in CreateProof
iTree, err := tree.GetImmutable(res.Height)
if err != nil {
// sanity check: If value for given version was retrieved, immutable tree must also be retrievable
panic(fmt.Sprintf("version exists in store but could not retrieve corresponding versioned tree in store, %s", err.Error()))
}
mtree := &iavl.MutableTree{
ImmutableTree: iTree,
}
// get proof from tree and convert to merkle.Proof before adding to result
res.ProofOps = getProofFromTree(mtree, req.Data, res.Value != nil)
case "/subspace":
pairs := kv.Pairs{
Pairs: make([]kv.Pair, 0),
}
subspace := req.Data
res.Key = subspace
iterator := types.KVStorePrefixIterator(st, subspace)
for ; iterator.Valid(); iterator.Next() {
pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()})
}
if err := iterator.Close(); err != nil {
panic(fmt.Errorf("failed to close iterator: %w", err))
}
bz, err := pairs.Marshal()
if err != nil {
panic(fmt.Errorf("failed to marshal KV pairs: %w", err))
}
res.Value = bz
default:
return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "unexpected query path: %v", req.Path)
}
return res, err
}
// TraverseStateChanges traverses the state changes between two versions and calls the given function.
func (st *Store) TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error {
return st.tree.TraverseStateChanges(startVersion, endVersion, fn)
}
// Takes a MutableTree, a key, and a flag for creating existence or absence proof and returns the
// appropriate merkle.Proof. Since this must be called after querying for the value, this function should never error
// Thus, it will panic on error rather than returning it
func getProofFromTree(tree *iavl.MutableTree, key []byte, exists bool) *cmtprotocrypto.ProofOps {
var (
commitmentProof *ics23.CommitmentProof
err error
)
if exists {
// value was found
commitmentProof, err = tree.GetMembershipProof(key)
if err != nil {
// sanity check: If value was found, membership proof must be creatable
panic(fmt.Sprintf("unexpected value for empty proof: %s", err.Error()))
}
} else {
// value wasn't found
commitmentProof, err = tree.GetNonMembershipProof(key)
if err != nil {
// sanity check: If value wasn't found, nonmembership proof must be creatable
panic(fmt.Sprintf("unexpected error for nonexistence proof: %s", err.Error()))
}
}
op := types.NewIavlCommitmentOp(key, commitmentProof)
return &cmtprotocrypto.ProofOps{Ops: []cmtprotocrypto.ProofOp{op.ProofOp()}}
}

View File

@ -1,713 +0,0 @@
package iavl
import (
"bytes"
crand "crypto/rand"
"fmt"
"math"
"sort"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/metrics"
"cosmossdk.io/store/types"
)
var (
cacheSize = 100
treeData = map[string]string{
"hello": "goodbye",
"aloha": "shalom",
}
nMoreData = 0
)
func randBytes(numBytes int) []byte {
b := make([]byte, numBytes)
_, _ = crand.Read(b)
return b
}
// make a tree with data from above and save it
func newAlohaTree(t *testing.T, db dbm.DB) (*iavl.MutableTree, types.CommitID) {
t.Helper()
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
for k, v := range treeData {
_, err := tree.Set([]byte(k), []byte(v))
require.NoError(t, err)
}
for i := 0; i < nMoreData; i++ {
key := randBytes(12)
value := randBytes(50)
_, err := tree.Set(key, value)
require.NoError(t, err)
}
hash, ver, err := tree.SaveVersion()
require.Nil(t, err)
return tree, types.CommitID{Version: ver, Hash: hash}
}
func TestLoadStore(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
// Create non-pruned height H
updated, err := tree.Set([]byte("hello"), []byte("hallo"))
require.NoError(t, err)
require.True(t, updated)
hash, verH, err := tree.SaveVersion()
cIDH := types.CommitID{Version: verH, Hash: hash}
require.Nil(t, err)
// Create pruned height Hp
updated, err = tree.Set([]byte("hello"), []byte("hola"))
require.NoError(t, err)
require.True(t, updated)
hash, verHp, err := tree.SaveVersion()
cIDHp := types.CommitID{Version: verHp, Hash: hash}
require.Nil(t, err)
// TODO: Prune this height
// Create current height Hc
updated, err = tree.Set([]byte("hello"), []byte("ciao"))
require.NoError(t, err)
require.True(t, updated)
hash, verHc, err := tree.SaveVersion()
cIDHc := types.CommitID{Version: verHc, Hash: hash}
require.Nil(t, err)
// Querying an existing store at some previous non-pruned height H
hStore, err := store.GetImmutable(verH)
require.NoError(t, err)
require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo")
// Querying an existing store at some previous pruned height Hp
hpStore, err := store.GetImmutable(verHp)
require.NoError(t, err)
require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola")
// Querying an existing store at current height Hc
hcStore, err := store.GetImmutable(verHc)
require.NoError(t, err)
require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao")
// Querying a new store at some previous non-pruned height H
newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
require.NoError(t, err)
require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo")
// Querying a new store at some previous pruned height Hp
newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
require.NoError(t, err)
require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola")
// Querying a new store at current height H
newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
require.NoError(t, err)
require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao")
}
func TestGetImmutable(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
updated, err := tree.Set([]byte("hello"), []byte("adios"))
require.NoError(t, err)
require.True(t, updated)
hash, ver, err := tree.SaveVersion()
cID := types.CommitID{Version: ver, Hash: hash}
require.Nil(t, err)
_, err = store.GetImmutable(cID.Version + 1)
require.Error(t, err)
newStore, err := store.GetImmutable(cID.Version - 1)
require.NoError(t, err)
require.Equal(t, newStore.Get([]byte("hello")), []byte("goodbye"))
newStore, err = store.GetImmutable(cID.Version)
require.NoError(t, err)
require.Equal(t, newStore.Get([]byte("hello")), []byte("adios"))
res, err := newStore.Query(&types.RequestQuery{Data: []byte("hello"), Height: cID.Version, Path: "/key", Prove: true})
require.NoError(t, err)
require.Equal(t, res.Value, []byte("adios"))
require.NotNil(t, res.ProofOps)
require.Panics(t, func() { newStore.Set(nil, nil) })
require.Panics(t, func() { newStore.Delete(nil) })
require.Panics(t, func() { newStore.Commit() })
}
func TestTestGetImmutableIterator(t *testing.T) {
db := dbm.NewMemDB()
tree, cID := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
newStore, err := store.GetImmutable(cID.Version)
require.NoError(t, err)
iter := newStore.Iterator([]byte("aloha"), []byte("hellz"))
expected := []string{"aloha", "hello"}
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
}
func TestIAVLStoreGetSetHasDelete(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree)
key := "hello"
exists := iavlStore.Has([]byte(key))
require.True(t, exists)
value := iavlStore.Get([]byte(key))
require.EqualValues(t, value, treeData[key])
value2 := "notgoodbye"
iavlStore.Set([]byte(key), []byte(value2))
value = iavlStore.Get([]byte(key))
require.EqualValues(t, value, value2)
iavlStore.Delete([]byte(key))
exists = iavlStore.Has([]byte(key))
require.False(t, exists)
}
func TestIAVLStoreNoNilSet(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree)
require.Panics(t, func() { iavlStore.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { iavlStore.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic")
}
func TestIAVLIterator(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree)
iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz"))
expected := []string{"aloha", "hello"}
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator([]byte("golang"), []byte("rocks"))
expected = []string{"hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator(nil, []byte("golang"))
expected = []string{"aloha"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator(nil, []byte("shalom"))
expected = []string{"aloha", "hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator(nil, nil)
expected = []string{"aloha", "hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator([]byte("golang"), nil)
expected = []string{"hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
}
func TestIAVLReverseIterator(t *testing.T) {
db := dbm.NewMemDB()
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte{0x00}, []byte("0"))
iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0"))
iavlStore.Set([]byte{0x00, 0x01}, []byte("0 1"))
iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2"))
iavlStore.Set([]byte{0x01}, []byte("1"))
testReverseIterator := func(t *testing.T, start, end []byte, expected []string) {
t.Helper()
iter := iavlStore.ReverseIterator(start, end)
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedValue := expected[i]
value := iter.Value()
require.EqualValues(t, string(value), expectedValue)
i++
}
require.Equal(t, len(expected), i)
}
testReverseIterator(t, nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"})
testReverseIterator(t, []byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"})
testReverseIterator(t, []byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"})
testReverseIterator(t, []byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"})
testReverseIterator(t, []byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"})
testReverseIterator(t, nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"})
}
func TestIAVLPrefixIterator(t *testing.T) {
db := dbm.NewMemDB()
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte("test1"), []byte("test1"))
iavlStore.Set([]byte("test2"), []byte("test2"))
iavlStore.Set([]byte("test3"), []byte("test3"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4"))
var i int
iter := types.KVStorePrefixIterator(iavlStore, []byte("test"))
expected := []string{"test1", "test2", "test3"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, expectedKey)
i++
}
iter.Close()
require.Equal(t, len(expected), i)
iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)})
expected2 := [][]byte{
{byte(55), byte(255), byte(255), byte(0)},
{byte(55), byte(255), byte(255), byte(1)},
{byte(55), byte(255), byte(255), byte(255)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
iter.Close()
require.Equal(t, len(expected), i)
iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(255), byte(255)})
expected2 = [][]byte{
{byte(255), byte(255), byte(0)},
{byte(255), byte(255), byte(1)},
{byte(255), byte(255), byte(255)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
iter.Close()
require.Equal(t, len(expected), i)
}
func TestIAVLReversePrefixIterator(t *testing.T) {
db := dbm.NewMemDB()
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte("test1"), []byte("test1"))
iavlStore.Set([]byte("test2"), []byte("test2"))
iavlStore.Set([]byte("test3"), []byte("test3"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4"))
var i int
iter := types.KVStoreReversePrefixIterator(iavlStore, []byte("test"))
expected := []string{"test3", "test2", "test1"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, expectedKey)
i++
}
require.Equal(t, len(expected), i)
iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)})
expected2 := [][]byte{
{byte(55), byte(255), byte(255), byte(255)},
{byte(55), byte(255), byte(255), byte(1)},
{byte(55), byte(255), byte(255), byte(0)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
require.Equal(t, len(expected), i)
iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(255), byte(255)})
expected2 = [][]byte{
{byte(255), byte(255), byte(255)},
{byte(255), byte(255), byte(1)},
{byte(255), byte(255), byte(0)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
require.Equal(t, len(expected), i)
}
func nextVersion(iavl *Store) {
key := []byte(fmt.Sprintf("Key for tree: %d", iavl.LastCommitID().Version))
value := []byte(fmt.Sprintf("Value for tree: %d", iavl.LastCommitID().Version))
iavl.Set(key, value)
iavl.Commit()
}
func TestIAVLNoPrune(t *testing.T) {
db := dbm.NewMemDB()
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
nextVersion(iavlStore)
for i := 1; i < 100; i++ {
for j := 1; j <= i; j++ {
require.True(t, iavlStore.VersionExists(int64(j)),
"Missing version %d with latest version %d. Should be storing all versions",
j, i)
}
nextVersion(iavlStore)
}
}
func TestIAVLStoreQuery(t *testing.T) {
db := dbm.NewMemDB()
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
k1, v1 := []byte("key1"), []byte("val1")
k2, v2 := []byte("key2"), []byte("val2")
v3 := []byte("val3")
ksub := []byte("key")
KVs0 := kv.Pairs{}
KVs1 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v1},
{Key: k2, Value: v2},
},
}
KVs2 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v3},
{Key: k2, Value: v2},
},
}
valExpSubEmpty, err := KVs0.Marshal()
require.NoError(t, err)
valExpSub1, err := KVs1.Marshal()
require.NoError(t, err)
valExpSub2, err := KVs2.Marshal()
require.NoError(t, err)
cid := iavlStore.Commit()
ver := cid.Version
query := types.RequestQuery{Path: "/key", Data: k1, Height: ver}
querySub := types.RequestQuery{Path: "/subspace", Data: ksub, Height: ver}
// query subspace before anything set
qres, err := iavlStore.Query(&querySub)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, valExpSubEmpty, qres.Value)
// set data
iavlStore.Set(k1, v1)
iavlStore.Set(k2, v2)
// set data without commit, doesn't show up
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Nil(t, qres.Value)
// commit it, but still don't see on old version
cid = iavlStore.Commit()
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Nil(t, qres.Value)
// but yes on the new version
query.Height = cid.Version
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v1, qres.Value)
// and for the subspace
qres, err = iavlStore.Query(&querySub)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, valExpSub1, qres.Value)
// modify
iavlStore.Set(k1, v3)
cid = iavlStore.Commit()
// query will return old values, as height is fixed
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v1, qres.Value)
// update to latest in the query and we are happy
query.Height = cid.Version
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v3, qres.Value)
query2 := types.RequestQuery{Path: "/key", Data: k2, Height: cid.Version}
qres, err = iavlStore.Query(&query2)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v2, qres.Value)
// and for the subspace
qres, err = iavlStore.Query(&querySub)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, valExpSub2, qres.Value)
// default (height 0) will show latest -1
query0 := types.RequestQuery{Path: "/key", Data: k1}
qres, err = iavlStore.Query(&query0)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v1, qres.Value)
}
func BenchmarkIAVLIteratorNext(b *testing.B) {
b.ReportAllocs()
db := dbm.NewMemDB()
treeSize := 1000
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
for i := 0; i < treeSize; i++ {
key := randBytes(4)
value := randBytes(50)
_, err := tree.Set(key, value)
require.NoError(b, err)
}
iavlStore := UnsafeNewStore(tree)
iterators := make([]types.Iterator, b.N/treeSize)
for i := 0; i < len(iterators); i++ {
iterators[i] = iavlStore.Iterator([]byte{0}, []byte{255, 255, 255, 255, 255})
}
b.ResetTimer()
for i := 0; i < len(iterators); i++ {
iter := iterators[i]
for j := 0; j < treeSize; j++ {
iter.Next()
}
}
}
func TestSetInitialVersion(t *testing.T) {
testCases := []struct {
name string
storeFn func(db *dbm.MemDB) *Store
expPanic bool
}{
{
"works with a mutable tree",
func(db *dbm.MemDB) *Store {
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
store := UnsafeNewStore(tree)
return store
}, false,
},
{
"throws error on immutable tree",
func(db *dbm.MemDB) *Store {
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
store := UnsafeNewStore(tree)
_, version, err := store.tree.SaveVersion()
require.NoError(t, err)
require.Equal(t, int64(1), version)
store, err = store.GetImmutable(1)
require.NoError(t, err)
return store
}, true,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
db := dbm.NewMemDB()
store := tc.storeFn(db)
if tc.expPanic {
require.Panics(t, func() { store.SetInitialVersion(5) })
} else {
store.SetInitialVersion(5)
cid := store.Commit()
require.Equal(t, int64(5), cid.GetVersion())
}
})
}
}
func TestCacheWraps(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}
func TestChangeSets(t *testing.T) {
db := dbm.NewMemDB()
treeSize := 1000
treeVersion := int64(10)
targetVersion := int64(6)
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger(), iavl.FlushThresholdOption(math.MaxInt))
for j := int64(0); j < treeVersion; j++ {
keys := [][]byte{}
for i := 0; i < treeSize; i++ {
keys = append(keys, randBytes(4))
}
sort.Slice(keys, func(p, q int) bool {
return bytes.Compare(keys[p], keys[q]) < 0
})
for i := 0; i < treeSize; i++ {
key := keys[i]
value := randBytes(50)
_, err := tree.Set(key, value)
require.NoError(t, err)
}
_, _, err := tree.SaveVersion()
require.NoError(t, err)
}
changeSets := []*iavl.ChangeSet{}
iavlStore := UnsafeNewStore(tree)
commitID := iavlStore.LastCommitID()
require.NoError(t, iavlStore.TraverseStateChanges(targetVersion+1, treeVersion, func(v int64, cs *iavl.ChangeSet) error {
changeSets = append(changeSets, cs)
return nil
}))
require.NoError(t, iavlStore.LoadVersionForOverwriting(targetVersion))
for i, cs := range changeSets {
v, err := tree.SaveChangeSet(cs)
require.NoError(t, err)
require.Equal(t, v, targetVersion+int64(i+1))
}
restoreCommitID := iavlStore.LastCommitID()
require.Equal(t, commitID, restoreCommitID)
}

View File

@ -1,99 +0,0 @@
package iavl
import (
"fmt"
"github.com/cosmos/iavl"
"cosmossdk.io/store/types"
)
var (
_ Tree = (*immutableTree)(nil)
_ Tree = (*iavl.MutableTree)(nil)
)
type (
// Tree defines an interface that both mutable and immutable IAVL trees
// must implement. For mutable IAVL trees, the interface is directly
// implemented by an iavl.MutableTree. For an immutable IAVL tree, a wrapper
// must be made.
Tree interface {
Has(key []byte) (bool, error)
Get(key []byte) ([]byte, error)
Set(key, value []byte) (bool, error)
Remove(key []byte) ([]byte, bool, error)
SaveVersion() ([]byte, int64, error)
Version() int64
Hash() []byte
WorkingHash() []byte
VersionExists(version int64) bool
DeleteVersionsTo(version int64) error
GetVersioned(key []byte, version int64) ([]byte, error)
GetImmutable(version int64) (*iavl.ImmutableTree, error)
SetInitialVersion(version uint64)
Iterator(start, end []byte, ascending bool) (types.Iterator, error)
AvailableVersions() []int
LoadVersionForOverwriting(targetVersion int64) error
TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error
}
// immutableTree is a simple wrapper around a reference to an iavl.ImmutableTree
// that implements the Tree interface. It should only be used for querying
// and iteration, specifically at previous heights.
immutableTree struct {
*iavl.ImmutableTree
}
)
func (it *immutableTree) Set(_, _ []byte) (bool, error) {
panic("cannot call 'Set' on an immutable IAVL tree")
}
func (it *immutableTree) Remove(_ []byte) ([]byte, bool, error) {
panic("cannot call 'Remove' on an immutable IAVL tree")
}
func (it *immutableTree) SaveVersion() ([]byte, int64, error) {
panic("cannot call 'SaveVersion' on an immutable IAVL tree")
}
func (it *immutableTree) DeleteVersionsTo(_ int64) error {
panic("cannot call 'DeleteVersionsTo' on an immutable IAVL tree")
}
func (it *immutableTree) SetInitialVersion(_ uint64) {
panic("cannot call 'SetInitialVersion' on an immutable IAVL tree")
}
func (it *immutableTree) VersionExists(version int64) bool {
return it.Version() == version
}
func (it *immutableTree) GetVersioned(key []byte, version int64) ([]byte, error) {
if it.Version() != version {
return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version())
}
return it.Get(key)
}
func (it *immutableTree) GetImmutable(version int64) (*iavl.ImmutableTree, error) {
if it.Version() != version {
return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version())
}
return it.ImmutableTree, nil
}
func (it *immutableTree) AvailableVersions() []int {
return []int{}
}
func (it *immutableTree) LoadVersionForOverwriting(targetVersion int64) error {
panic("cannot call 'LoadVersionForOverwriting' on an immutable IAVL tree")
}
func (it *immutableTree) WorkingHash() []byte {
panic("cannot call 'WorkingHash' on an immutable IAVL tree")
}

View File

@ -1,40 +0,0 @@
package iavl
import (
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
)
func TestImmutableTreePanics(t *testing.T) {
t.Parallel()
immTree := iavl.NewImmutableTree(dbm.NewMemDB(), 100, false, log.NewNopLogger())
it := &immutableTree{immTree}
require.Panics(t, func() {
_, err := it.Set([]byte{}, []byte{})
require.NoError(t, err)
})
require.Panics(t, func() {
_, _, err := it.Remove([]byte{})
require.NoError(t, err)
})
require.Panics(t, func() { _, _, _ = it.SaveVersion() })
require.Panics(t, func() { _ = it.DeleteVersionsTo(int64(1)) })
val, err := it.GetVersioned(nil, 1)
require.Error(t, err)
require.Nil(t, val)
imm, err := it.GetImmutable(1)
require.Error(t, err)
require.Nil(t, imm)
imm, err = it.GetImmutable(0)
require.NoError(t, err)
require.NotNil(t, imm)
require.Equal(t, immTree, imm)
}

View File

@ -1,2 +0,0 @@
// Package conv provides internal functions for convertions and data manipulation
package conv

View File

@ -1,19 +0,0 @@
package conv
import (
"unsafe"
)
// UnsafeStrToBytes uses unsafe to convert string into byte array. Returned bytes
// must not be altered after this function is called as it will cause a segmentation fault.
func UnsafeStrToBytes(s string) []byte {
return unsafe.Slice(unsafe.StringData(s), len(s)) // ref https://github.com/golang/go/issues/53003#issuecomment-1140276077
}
// UnsafeBytesToStr is meant to make a zero allocation conversion
// from []byte -> string to speed up operations, it is not meant
// to be used generally, but for a specific pattern to delete keys
// from a map.
func UnsafeBytesToStr(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}

View File

@ -1,54 +0,0 @@
package conv
import (
"runtime"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/suite"
)
func TestStringSuite(t *testing.T) {
suite.Run(t, new(StringSuite))
}
type StringSuite struct{ suite.Suite }
func unsafeConvertStr() []byte {
return UnsafeStrToBytes("abc")
}
func (s *StringSuite) TestUnsafeStrToBytes() {
// we convert in other function to trigger GC. We want to check that
// the underlying array in []bytes is accessible after GC will finish swapping.
for i := 0; i < 5; i++ {
b := unsafeConvertStr()
runtime.GC()
<-time.NewTimer(2 * time.Millisecond).C
b2 := append(b, 'd')
s.Equal("abc", string(b))
s.Equal("abcd", string(b2))
}
}
func unsafeConvertBytes() string {
return UnsafeBytesToStr([]byte("abc"))
}
func (s *StringSuite) TestUnsafeBytesToStr() {
// we convert in other function to trigger GC. We want to check that
// the underlying array in []bytes is accessible after GC will finish swapping.
for i := 0; i < 5; i++ {
str := unsafeConvertBytes()
runtime.GC()
<-time.NewTimer(2 * time.Millisecond).C
s.Equal("abc", str)
}
}
func BenchmarkUnsafeStrToBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
UnsafeStrToBytes(strconv.Itoa(i))
}
}

View File

@ -5,6 +5,17 @@ import (
"sort"
)
type (
Pair struct {
Key []byte
Value []byte
}
Pairs struct {
Pairs []Pair
}
)
func (kvs Pairs) Len() int { return len(kvs.Pairs) }
func (kvs Pairs) Less(i, j int) bool {
switch bytes.Compare(kvs.Pairs[i].Key, kvs.Pairs[j].Key) {

View File

@ -1,559 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: cosmos/store/internal/kv/v1beta1/kv.proto
package kv
import (
fmt "fmt"
_ "github.com/cosmos/gogoproto/gogoproto"
proto "github.com/cosmos/gogoproto/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Pairs defines a repeated slice of Pair objects.
type Pairs struct {
Pairs []Pair `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs"`
}
func (m *Pairs) Reset() { *m = Pairs{} }
func (m *Pairs) String() string { return proto.CompactTextString(m) }
func (*Pairs) ProtoMessage() {}
func (*Pairs) Descriptor() ([]byte, []int) {
return fileDescriptor_534782c4083e056d, []int{0}
}
func (m *Pairs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Pairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Pairs.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Pairs) XXX_Merge(src proto.Message) {
xxx_messageInfo_Pairs.Merge(m, src)
}
func (m *Pairs) XXX_Size() int {
return m.Size()
}
func (m *Pairs) XXX_DiscardUnknown() {
xxx_messageInfo_Pairs.DiscardUnknown(m)
}
var xxx_messageInfo_Pairs proto.InternalMessageInfo
func (m *Pairs) GetPairs() []Pair {
if m != nil {
return m.Pairs
}
return nil
}
// Pair defines a key/value bytes tuple.
type Pair struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Pair) Reset() { *m = Pair{} }
func (m *Pair) String() string { return proto.CompactTextString(m) }
func (*Pair) ProtoMessage() {}
func (*Pair) Descriptor() ([]byte, []int) {
return fileDescriptor_534782c4083e056d, []int{1}
}
func (m *Pair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Pair.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Pair) XXX_Merge(src proto.Message) {
xxx_messageInfo_Pair.Merge(m, src)
}
func (m *Pair) XXX_Size() int {
return m.Size()
}
func (m *Pair) XXX_DiscardUnknown() {
xxx_messageInfo_Pair.DiscardUnknown(m)
}
var xxx_messageInfo_Pair proto.InternalMessageInfo
func (m *Pair) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *Pair) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Pairs)(nil), "cosmos.store.internal.kv.v1beta1.Pairs")
proto.RegisterType((*Pair)(nil), "cosmos.store.internal.kv.v1beta1.Pair")
}
func init() {
proto.RegisterFile("cosmos/store/internal/kv/v1beta1/kv.proto", fileDescriptor_534782c4083e056d)
}
var fileDescriptor_534782c4083e056d = []byte{
// 217 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce,
0xcd, 0x2f, 0xd6, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b,
0xcc, 0xd1, 0xcf, 0x2e, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0x2e, 0xd3,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x80, 0x28, 0xd5, 0x03, 0x2b, 0xd5, 0x83, 0x29, 0xd5,
0xcb, 0x2e, 0xd3, 0x83, 0x2a, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd6, 0x07, 0xb1,
0x20, 0xfa, 0x94, 0xbc, 0xb9, 0x58, 0x03, 0x12, 0x33, 0x8b, 0x8a, 0x85, 0x9c, 0xb8, 0x58, 0x0b,
0x40, 0x0c, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x35, 0x3d, 0x42, 0x06, 0xea, 0x81, 0xf4,
0x39, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0xd1, 0xaa, 0xa4, 0xc7, 0xc5, 0x02, 0x12, 0x14,
0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85,
0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x93, 0xc5,
0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c,
0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xc9, 0x41, 0x6c, 0x2f, 0x4e, 0xc9,
0xd6, 0xcb, 0xcc, 0xc7, 0xf4, 0x7f, 0x12, 0x1b, 0xd8, 0xf5, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff,
0xff, 0x5d, 0xad, 0x97, 0xdd, 0x22, 0x01, 0x00, 0x00,
}
func (m *Pairs) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Pairs) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Pairs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Pairs) > 0 {
for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Pairs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintKv(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Pair) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Pair) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Value) > 0 {
i -= len(m.Value)
copy(dAtA[i:], m.Value)
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x12
}
if len(m.Key) > 0 {
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
offset -= sovKv(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Pairs) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKv(uint64(l))
}
}
return n
}
func (m *Pair) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
return n
}
func sovKv(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozKv(x uint64) (n int) {
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Pairs) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Pairs: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Pairs: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthKv
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, Pair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKv(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Pair) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Pair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Pair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthKv
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthKv
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKv(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipKv(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthKv
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupKv
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthKv
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group")
)

View File

@ -7,8 +7,8 @@ import (
"github.com/cometbft/cometbft/crypto/tmhash"
cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/internal/tree"
"cosmossdk.io/store/v2/internal/kv"
"cosmossdk.io/store/v2/internal/tree"
)
// merkleMap defines a merkle-ized tree from a map. Leave values are treated as

View File

@ -6,7 +6,7 @@ import (
ics23 "github.com/cosmos/ics23/go"
sdkmaps "cosmossdk.io/store/internal/maps"
"cosmossdk.io/store/v2/internal/maps"
)
var (
@ -93,7 +93,7 @@ func createExistenceProof(data map[string][]byte, key []byte) (*ics23.ExistenceP
return nil, errors.New("cannot make existence proof if key is not in map")
}
_, proofs, _ := sdkmaps.ProofsFromMap(data)
_, proofs, _ := maps.ProofsFromMap(data)
proof := proofs[string(key)]
if proof == nil {
return nil, errors.New("returned no proof for key")

View File

@ -7,7 +7,7 @@ import (
"golang.org/x/exp/maps"
"cosmossdk.io/math/unsafe"
sdkmaps "cosmossdk.io/store/internal/maps"
internalmaps "cosmossdk.io/store/v2/internal/maps"
)
// SimpleResult contains a merkle.SimpleProof along with all data needed to build the confio/proof
@ -23,7 +23,7 @@ type SimpleResult struct {
// returns a range proof and the root hash of the tree
func GenerateRangeProof(size int, loc Where) *SimpleResult {
data := BuildMap(size)
root, proofs, allkeys := sdkmaps.ProofsFromMap(data)
root, proofs, allkeys := internalmaps.ProofsFromMap(data)
key := GetKey(allkeys, loc)
proof := proofs[key]
@ -53,7 +53,7 @@ func SortedKeys(data map[string][]byte) []string {
}
func CalcRoot(data map[string][]byte) []byte {
root, _, _ := sdkmaps.ProofsFromMap(data)
root, _, _ := internalmaps.ProofsFromMap(data)
return root
}

38
store/iterator.go Normal file
View File

@ -0,0 +1,38 @@
package store
// Iterator defines an interface for iterating over a domain of key/value pairs.
type Iterator interface {
// Domain returns the start (inclusive) and end (exclusive) limits of the iterator.
Domain() ([]byte, []byte)
// Valid returns if the iterator is currently valid.
Valid() bool
// Error returns any accumulated error. Error() should be called after all
// key/value pairs have been exhausted, i.e. after Next() has returned false.
Error() error
// Key returns the key of the current key/value pair, or nil if done.
Key() []byte
// Value returns the value of the current key/value pair, or nil if done.
Value() []byte
// Next moves the iterator to the next key/value pair.
Next() bool
// Close releases associated resources. It should NOT be idempotent. It must
// only be called once and any call after may panic.
Close()
}
// IteratorCreator defines an interface for creating forward and reverse iterators.
type IteratorCreator interface {
// Iterator creates a new iterator for the given store name and domain, where
// domain is defined by [start, end). Note, both start and end are optional.
Iterator(storeKey string, start, end []byte) (Iterator, error)
// ReverseIterator creates a new reverse iterator for the given store name
// and domain, where domain is defined by [start, end). Note, both start and
// end are optional.
ReverseIterator(storeKey string, start, end []byte) (Iterator, error)
}

View File

@ -1,142 +0,0 @@
package listenkv
import (
"io"
"cosmossdk.io/store/types"
)
var _ types.KVStore = &Store{}
// Store implements the KVStore interface with listening enabled.
// Operations are traced on each core KVStore call and written to any of the
// underlying listeners with the proper key and operation permissions
type Store struct {
parent types.KVStore
listener *types.MemoryListener
parentStoreKey types.StoreKey
}
// NewStore returns a reference to a new traceKVStore given a parent
// KVStore implementation and a buffered writer.
func NewStore(parent types.KVStore, parentStoreKey types.StoreKey, listener *types.MemoryListener) *Store {
return &Store{parent: parent, listener: listener, parentStoreKey: parentStoreKey}
}
// Get implements the KVStore interface. It traces a read operation and
// delegates a Get call to the parent KVStore.
func (s *Store) Get(key []byte) []byte {
value := s.parent.Get(key)
return value
}
// Set implements the KVStore interface. It traces a write operation and
// delegates the Set call to the parent KVStore.
func (s *Store) Set(key, value []byte) {
types.AssertValidKey(key)
s.parent.Set(key, value)
s.listener.OnWrite(s.parentStoreKey, key, value, false)
}
// Delete implements the KVStore interface. It traces a write operation and
// delegates the Delete call to the parent KVStore.
func (s *Store) Delete(key []byte) {
s.parent.Delete(key)
s.listener.OnWrite(s.parentStoreKey, key, nil, true)
}
// Has implements the KVStore interface. It delegates the Has call to the
// parent KVStore.
func (s *Store) Has(key []byte) bool {
return s.parent.Has(key)
}
// Iterator implements the KVStore interface. It delegates the Iterator call
// the to the parent KVStore.
func (s *Store) Iterator(start, end []byte) types.Iterator {
return s.iterator(start, end, true)
}
// ReverseIterator implements the KVStore interface. It delegates the
// ReverseIterator call the to the parent KVStore.
func (s *Store) ReverseIterator(start, end []byte) types.Iterator {
return s.iterator(start, end, false)
}
// iterator facilitates iteration over a KVStore. It delegates the necessary
// calls to it's parent KVStore.
func (s *Store) iterator(start, end []byte, ascending bool) types.Iterator {
var parent types.Iterator
if ascending {
parent = s.parent.Iterator(start, end)
} else {
parent = s.parent.ReverseIterator(start, end)
}
return newTraceIterator(parent, s.listener)
}
type listenIterator struct {
parent types.Iterator
listener *types.MemoryListener
}
func newTraceIterator(parent types.Iterator, listener *types.MemoryListener) types.Iterator {
return &listenIterator{parent: parent, listener: listener}
}
// Domain implements the Iterator interface.
func (li *listenIterator) Domain() (start, end []byte) {
return li.parent.Domain()
}
// Valid implements the Iterator interface.
func (li *listenIterator) Valid() bool {
return li.parent.Valid()
}
// Next implements the Iterator interface.
func (li *listenIterator) Next() {
li.parent.Next()
}
// Key implements the Iterator interface.
func (li *listenIterator) Key() []byte {
key := li.parent.Key()
return key
}
// Value implements the Iterator interface.
func (li *listenIterator) Value() []byte {
value := li.parent.Value()
return value
}
// Close implements the Iterator interface.
func (li *listenIterator) Close() error {
return li.parent.Close()
}
// Error delegates the Error call to the parent iterator.
func (li *listenIterator) Error() error {
return li.parent.Error()
}
// GetStoreType implements the KVStore interface. It returns the underlying
// KVStore type.
func (s *Store) GetStoreType() types.StoreType {
return s.parent.GetStoreType()
}
// CacheWrap implements the KVStore interface. It panics as a Store
// cannot be cache wrapped.
func (s *Store) CacheWrap() types.CacheWrap {
panic("cannot CacheWrap a ListenKVStore")
}
// CacheWrapWithTrace implements the KVStore interface. It panics as a
// Store cannot be cache wrapped.
func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap {
panic("cannot CacheWrapWithTrace a ListenKVStore")
}

View File

@ -1,281 +0,0 @@
package listenkv_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/listenkv"
"cosmossdk.io/store/prefix"
"cosmossdk.io/store/types"
)
func bz(s string) []byte { return []byte(s) }
func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) }
func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) }
var kvPairs = []kv.Pair{
{Key: keyFmt(1), Value: valFmt(1)},
{Key: keyFmt(2), Value: valFmt(2)},
{Key: keyFmt(3), Value: valFmt(3)},
}
var testStoreKey = types.NewKVStoreKey("listen_test")
func newListenKVStore(listener *types.MemoryListener) *listenkv.Store {
store := newEmptyListenKVStore(listener)
for _, kvPair := range kvPairs {
store.Set(kvPair.Key, kvPair.Value)
}
return store
}
func newEmptyListenKVStore(listener *types.MemoryListener) *listenkv.Store {
memDB := dbadapter.Store{DB: dbm.NewMemDB()}
return listenkv.NewStore(memDB, testStoreKey, listener)
}
func TestListenKVStoreGet(t *testing.T) {
testCases := []struct {
key []byte
expectedValue []byte
}{
{
key: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
},
{
key: []byte("does-not-exist"),
expectedValue: nil,
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
value := store.Get(tc.key)
require.Equal(t, tc.expectedValue, value)
}
}
func TestListenKVStoreSet(t *testing.T) {
testCases := []struct {
key []byte
value []byte
expectedOut *types.StoreKVPair
}{
{
key: kvPairs[0].Key,
value: kvPairs[0].Value,
expectedOut: &types.StoreKVPair{
Key: kvPairs[0].Key,
Value: kvPairs[0].Value,
StoreKey: testStoreKey.Name(),
Delete: false,
},
},
{
key: kvPairs[1].Key,
value: kvPairs[1].Value,
expectedOut: &types.StoreKVPair{
Key: kvPairs[1].Key,
Value: kvPairs[1].Value,
StoreKey: testStoreKey.Name(),
Delete: false,
},
},
{
key: kvPairs[2].Key,
value: kvPairs[2].Value,
expectedOut: &types.StoreKVPair{
Key: kvPairs[2].Key,
Value: kvPairs[2].Value,
StoreKey: testStoreKey.Name(),
Delete: false,
},
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newEmptyListenKVStore(listener)
store.Set(tc.key, tc.value)
storeKVPair := listener.PopStateCache()[0]
require.Equal(t, tc.expectedOut, storeKVPair)
}
listener := types.NewMemoryListener()
store := newEmptyListenKVStore(listener)
require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic")
}
func TestListenKVStoreDelete(t *testing.T) {
testCases := []struct {
key []byte
expectedOut *types.StoreKVPair
}{
{
key: kvPairs[0].Key,
expectedOut: &types.StoreKVPair{
Key: kvPairs[0].Key,
Value: nil,
StoreKey: testStoreKey.Name(),
Delete: true,
},
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
store.Delete(tc.key)
cache := listener.PopStateCache()
require.NotEmpty(t, cache)
storeKVPair := cache[len(cache)-1]
require.Equal(t, tc.expectedOut, storeKVPair)
}
}
func TestListenKVStoreHas(t *testing.T) {
testCases := []struct {
key []byte
expected bool
}{
{
key: kvPairs[0].Key,
expected: true,
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
ok := store.Has(tc.key)
require.Equal(t, tc.expected, ok)
}
}
func TestTestListenKVStoreIterator(t *testing.T) {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
iterator := store.Iterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []byte(nil), s)
require.Equal(t, []byte(nil), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
}{
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
},
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
},
}
for _, tc := range testCases {
ka := iterator.Key()
require.Equal(t, tc.expectedKey, ka)
va := iterator.Value()
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NoError(t, iterator.Close())
}
func TestTestListenKVStoreReverseIterator(t *testing.T) {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
iterator := store.ReverseIterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []byte(nil), s)
require.Equal(t, []byte(nil), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
}{
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
},
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
},
}
for _, tc := range testCases {
ka := iterator.Key()
require.Equal(t, tc.expectedKey, ka)
va := iterator.Value()
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NoError(t, iterator.Close())
}
func TestListenKVStorePrefix(t *testing.T) {
store := newEmptyListenKVStore(nil)
pStore := prefix.NewStore(store, []byte("listen_prefix"))
require.IsType(t, prefix.Store{}, pStore)
}
func TestListenKVStoreGetStoreType(t *testing.T) {
memDB := dbadapter.Store{DB: dbm.NewMemDB()}
store := newEmptyListenKVStore(nil)
require.Equal(t, memDB.GetStoreType(), store.GetStoreType())
}
func TestListenKVStoreCacheWrap(t *testing.T) {
store := newEmptyListenKVStore(nil)
require.Panics(t, func() { store.CacheWrap() })
}
func TestListenKVStoreCacheWrapWithTrace(t *testing.T) {
store := newEmptyListenKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) })
}

View File

@ -1,53 +0,0 @@
package mem_test
import (
"testing"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/mem"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/types"
)
func TestStore(t *testing.T) {
db := mem.NewStore()
require.Equal(t, types.StoreTypeMemory, db.GetStoreType())
key, value := []byte("key"), []byte("value")
require.Nil(t, db.Get(key))
db.Set(key, value)
require.Equal(t, value, db.Get(key))
newValue := []byte("newValue")
db.Set(key, newValue)
require.Equal(t, newValue, db.Get(key))
db.Delete(key)
require.Nil(t, db.Get(key))
cacheWrapper := db.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}
func TestCommit(t *testing.T) {
db := mem.NewStore()
key, value := []byte("key"), []byte("value")
db.Set(key, value)
id := db.Commit()
require.True(t, id.IsZero())
require.True(t, db.LastCommitID().IsZero())
require.Equal(t, value, db.Get(key))
}
func TestStorePrunningOptions(t *testing.T) {
// this is a no-op
db := mem.NewStore()
require.Equal(t, pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined), db.GetPruning())
}

View File

@ -1,62 +0,0 @@
package mem
import (
"io"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.Committer = (*Store)(nil)
)
// Store implements an in-memory only KVStore. Entries are persisted between
// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node
type Store struct {
dbadapter.Store
}
func NewStore() *Store {
return NewStoreWithDB(dbm.NewMemDB())
}
func NewStoreWithDB(db *dbm.MemDB) *Store { //nolint: interfacer // Concrete return type is fine here.
return &Store{Store: dbadapter.Store{DB: db}}
}
// GetStoreType returns the Store's type.
func (s Store) GetStoreType() types.StoreType {
return types.StoreTypeMemory
}
// CacheWrap branches the underlying store.
func (s Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
// CacheWrapWithTrace implements KVStore.
func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
// Commit performs a no-op as entries are persistent between commitments.
func (s *Store) Commit() (id types.CommitID) { return }
func (s *Store) SetPruning(pruning pruningtypes.PruningOptions) {}
// GetPruning is a no-op as pruning options cannot be directly set on this store.
// They must be set on the root commit multi-store.
func (s *Store) GetPruning() pruningtypes.PruningOptions {
return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)
}
func (s Store) LastCommitID() (id types.CommitID) { return }
func (s Store) WorkingHash() (hash []byte) { return }

8
store/memkv/README.md Normal file
View File

@ -0,0 +1,8 @@
# memkv
The `memkv.Store` implementation defines an in-memory `KVStore`, which is internally
backed by a thread-safe BTree. The `memkv.Store` does not provide any branching
functionality and should be used as an ephemeral store, typically reset between
blocks. A `memkv.Store` contains no reference to a parent store, but can be used
as a parent store for other stores. The `memkv.Store` is can be useful for testing
purposes and where state persistence is not required or should be ephemeral.

120
store/memkv/iterator.go Normal file
View File

@ -0,0 +1,120 @@
package memkv
import (
"bytes"
"github.com/tidwall/btree"
"golang.org/x/exp/slices"
"cosmossdk.io/store/v2"
)
var _ store.Iterator = (*iterator)(nil)
type iterator struct {
treeItr btree.IterG[store.KVPair]
start []byte
end []byte
reverse bool
valid bool
}
func newIterator(tree *btree.BTreeG[store.KVPair], start, end []byte, reverse bool) store.Iterator {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
panic(store.ErrKeyEmpty)
}
if start != nil && end != nil && bytes.Compare(start, end) > 0 {
panic(store.ErrStartAfterEnd)
}
iter := tree.Iter()
var valid bool
if reverse {
if end != nil {
valid = iter.Seek(store.KVPair{Key: end, Value: nil})
if !valid {
valid = iter.Last()
} else {
valid = iter.Prev() // end is exclusive
}
} else {
valid = iter.Last()
}
} else {
if start != nil {
valid = iter.Seek(store.KVPair{Key: start, Value: nil})
} else {
valid = iter.First()
}
}
itr := &iterator{
treeItr: iter,
start: start,
end: end,
reverse: reverse,
valid: valid,
}
if itr.valid {
itr.valid = itr.keyInRange(itr.Key())
}
return itr
}
// Domain returns the domain of the iterator. The caller must not modify the
// return values.
func (itr *iterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
func (itr *iterator) Valid() bool {
return itr.valid
}
func (itr *iterator) Key() []byte {
return slices.Clone(itr.treeItr.Item().Key)
}
func (itr *iterator) Value() []byte {
return slices.Clone(itr.treeItr.Item().Value)
}
func (itr *iterator) Next() bool {
if !itr.valid {
return false
}
if !itr.reverse {
itr.valid = itr.treeItr.Next()
} else {
itr.valid = itr.treeItr.Prev()
}
if itr.valid {
itr.valid = itr.keyInRange(itr.Key())
}
return itr.valid
}
func (itr *iterator) Close() {
itr.treeItr.Release()
}
func (itr *iterator) Error() error {
return nil
}
func (itr *iterator) keyInRange(key []byte) bool {
if !itr.reverse && itr.end != nil && bytes.Compare(key, itr.end) >= 0 {
return false
}
if itr.reverse && itr.start != nil && bytes.Compare(key, itr.start) < 0 {
return false
}
return true
}

104
store/memkv/store.go Normal file
View File

@ -0,0 +1,104 @@
package memkv
import (
"bytes"
"github.com/tidwall/btree"
"cosmossdk.io/store/v2"
)
const (
// degree defines the approximate number of items and children per B-tree node.
degree = 32
)
var _ store.KVStore = (*Store)(nil)
// Store defines an in-memory KVStore backed by a BTree for storage, indexing,
// and iteration. Note, the store is ephemeral and does not support commitment.
// If using the store between blocks or commitments, the caller must ensure to
// either create a new store or call Reset() on the existing store.
type Store struct {
storeKey string
tree *btree.BTreeG[store.KVPair]
}
func New(storeKey string) store.KVStore {
return &Store{
storeKey: storeKey,
tree: btree.NewBTreeGOptions(
func(a, b store.KVPair) bool { return bytes.Compare(a.Key, b.Key) <= -1 },
btree.Options{
Degree: degree,
NoLocks: false,
}),
}
}
func (s *Store) GetStoreKey() string {
return s.storeKey
}
func (s *Store) GetStoreType() store.StoreType {
return store.StoreTypeMem
}
func (s *Store) Get(key []byte) []byte {
store.AssertValidKey(key)
kvPair, ok := s.tree.Get(store.KVPair{Key: key, StoreKey: s.storeKey})
if !ok || kvPair.Value == nil {
return nil
}
return kvPair.Value
}
func (s *Store) Has(key []byte) bool {
store.AssertValidKey(key)
return s.Get(key) != nil
}
func (s *Store) Set(key, value []byte) {
store.AssertValidKey(key)
store.AssertValidValue(value)
s.tree.Set(store.KVPair{Key: key, Value: value, StoreKey: s.storeKey})
}
func (s *Store) Delete(key []byte) {
store.AssertValidKey(key)
s.tree.Set(store.KVPair{Key: key, StoreKey: s.storeKey, Value: nil})
}
func (s *Store) GetChangeset() *store.Changeset {
itr := s.Iterator(nil, nil)
defer itr.Close()
var kvPairs []store.KVPair
for ; itr.Valid(); itr.Next() {
kvPairs = append(kvPairs, store.KVPair{
StoreKey: s.storeKey,
Key: itr.Key(),
Value: itr.Value(),
})
}
return store.NewChangeset(kvPairs...)
}
func (s *Store) Reset() error {
s.tree.Clear()
return nil
}
func (s *Store) Iterator(start, end []byte) store.Iterator {
return newIterator(s.tree, start, end, false)
}
func (s *Store) ReverseIterator(start, end []byte) store.Iterator {
return newIterator(s.tree, start, end, true)
}

257
store/memkv/store_test.go Normal file
View File

@ -0,0 +1,257 @@
package memkv_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/suite"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/memkv"
)
const storeKey = "storeKey"
type StoreTestSuite struct {
suite.Suite
kvStore store.KVStore
}
func TestStorageTestSuite(t *testing.T) {
suite.Run(t, &StoreTestSuite{})
}
func (s *StoreTestSuite) SetupTest() {
s.kvStore = memkv.New(storeKey)
}
func (s *StoreTestSuite) TestGetStoreType() {
s.Require().Equal(store.StoreTypeMem, s.kvStore.GetStoreType())
}
func (s *StoreTestSuite) TestGetChangeset() {
// initial store with no writes should have an empty changeset
cs := s.kvStore.GetChangeset()
s.Require().Zero(cs.Size())
// perform some writes
s.kvStore.Set([]byte("key000"), []byte("updated_val000"))
s.kvStore.Delete([]byte("key001"))
cs = s.kvStore.GetChangeset()
s.Require().Equal(cs.Size(), 2)
}
func (s *StoreTestSuite) TestReset() {
s.Require().NoError(s.kvStore.Reset())
cs := s.kvStore.GetChangeset()
s.Require().Zero(cs.Size())
}
func (s *StoreTestSuite) TestCRUD() {
bz := s.kvStore.Get([]byte("key000"))
s.Require().Nil(bz)
s.Require().False(s.kvStore.Has([]byte("key000")))
s.kvStore.Set([]byte("key000"), []byte("val000"))
bz = s.kvStore.Get([]byte("key000"))
s.Require().Equal([]byte("val000"), bz)
s.Require().True(s.kvStore.Has([]byte("key000")))
s.kvStore.Delete([]byte("key000"))
bz = s.kvStore.Get([]byte("key000"))
s.Require().Nil(bz)
s.Require().False(s.kvStore.Has([]byte("key000")))
}
func (s *StoreTestSuite) TestIterator() {
for i := 0; i < 100; i++ {
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
s.kvStore.Set([]byte(key), []byte(val))
}
// iterator without an end domain
s.Run("start_only", func() {
itr := s.kvStore.Iterator([]byte("key000"), nil)
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator without a start domain
s.Run("end_only", func() {
itr := s.kvStore.Iterator(nil, []byte("key100"))
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with with a start and end domain
s.Run("start_and_end", func() {
itr := s.kvStore.Iterator([]byte("key000"), []byte("key050"))
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(50, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// iterator with an open domain
s.Run("open_domain", func() {
itr := s.kvStore.Iterator(nil, nil)
defer itr.Close()
var i, count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i++
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
}
func (s *StoreTestSuite) TestReverseIterator() {
for i := 0; i < 100; i++ {
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
s.kvStore.Set([]byte(key), []byte(val))
}
// reverse iterator without an end domain
s.Run("start_only", func() {
itr := s.kvStore.ReverseIterator([]byte("key000"), nil)
defer itr.Close()
i := 99
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// reverse iterator without a start domain
s.Run("end_only", func() {
itr := s.kvStore.ReverseIterator(nil, []byte("key100"))
defer itr.Close()
i := 99
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Valid())
s.Require().False(itr.Next())
})
// reverse iterator with with a start and end domain
s.Run("start_and_end", func() {
itr := s.kvStore.ReverseIterator([]byte("key000"), []byte("key050"))
defer itr.Close()
i := 49
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(50, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
// reverse iterator with an open domain
s.Run("open_domain", func() {
itr := s.kvStore.ReverseIterator(nil, nil)
defer itr.Close()
i := 99
var count int
for ; itr.Valid(); itr.Next() {
s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key()))
s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value())
i--
count++
}
s.Require().Equal(100, count)
s.Require().NoError(itr.Error())
// seek past domain, which should make the iterator invalid and produce an error
s.Require().False(itr.Next())
s.Require().False(itr.Valid())
})
}

View File

@ -1,56 +0,0 @@
package metrics
import (
"time"
"github.com/hashicorp/go-metrics"
)
// StoreMetrics defines the set of metrics for the store package
type StoreMetrics interface {
MeasureSince(keys ...string)
}
var (
_ StoreMetrics = Metrics{}
_ StoreMetrics = NoOpMetrics{}
)
// Metrics defines the metrics wrapper for the store package
type Metrics struct {
Labels []metrics.Label
}
// NewMetrics returns a new instance of the Metrics with labels set by the node operator
func NewMetrics(labels [][]string) Metrics {
gatherer := Metrics{}
if numGlobalLables := len(labels); numGlobalLables > 0 {
parsedGlobalLabels := make([]metrics.Label, numGlobalLables)
for i, gl := range labels {
parsedGlobalLabels[i] = metrics.Label{Name: gl[0], Value: gl[1]}
}
gatherer.Labels = parsedGlobalLabels
}
return gatherer
}
// MeasureSince provides a wrapper functionality for emitting a time measure
// metric with global labels (if any).
func (m Metrics) MeasureSince(keys ...string) {
start := time.Now()
metrics.MeasureSinceWithLabels(keys, start.UTC(), m.Labels)
}
// NoOpMetrics is a no-op implementation of the StoreMetrics interface
type NoOpMetrics struct{}
// NewNoOpMetrics returns a new instance of the NoOpMetrics
func NewNoOpMetrics() NoOpMetrics {
return NoOpMetrics{}
}
// MeasureSince is a no-op implementation of the StoreMetrics interface to avoid time.Now() calls
func (m NoOpMetrics) MeasureSince(keys ...string) {}

View File

@ -1,221 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/cosmos/cosmos-db (interfaces: DB)
// Package mock is a generated GoMock package.
package mock
import (
reflect "reflect"
db "github.com/cosmos/cosmos-db"
gomock "github.com/golang/mock/gomock"
)
// MockDB is a mock of DB interface.
type MockDB struct {
ctrl *gomock.Controller
recorder *MockDBMockRecorder
}
// MockDBMockRecorder is the mock recorder for MockDB.
type MockDBMockRecorder struct {
mock *MockDB
}
// NewMockDB creates a new mock instance.
func NewMockDB(ctrl *gomock.Controller) *MockDB {
mock := &MockDB{ctrl: ctrl}
mock.recorder = &MockDBMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDB) EXPECT() *MockDBMockRecorder {
return m.recorder
}
// Close mocks base method.
func (m *MockDB) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close.
func (mr *MockDBMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDB)(nil).Close))
}
// Delete mocks base method.
func (m *MockDB) Delete(arg0 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockDBMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDB)(nil).Delete), arg0)
}
// DeleteSync mocks base method.
func (m *MockDB) DeleteSync(arg0 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSync", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSync indicates an expected call of DeleteSync.
func (mr *MockDBMockRecorder) DeleteSync(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSync", reflect.TypeOf((*MockDB)(nil).DeleteSync), arg0)
}
// Get mocks base method.
func (m *MockDB) Get(arg0 []byte) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockDBMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDB)(nil).Get), arg0)
}
// Has mocks base method.
func (m *MockDB) Has(arg0 []byte) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Has", arg0)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Has indicates an expected call of Has.
func (mr *MockDBMockRecorder) Has(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDB)(nil).Has), arg0)
}
// Iterator mocks base method.
func (m *MockDB) Iterator(arg0, arg1 []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Iterator", arg0, arg1)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Iterator indicates an expected call of Iterator.
func (mr *MockDBMockRecorder) Iterator(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDB)(nil).Iterator), arg0, arg1)
}
// NewBatch mocks base method.
func (m *MockDB) NewBatch() db.Batch {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewBatch")
ret0, _ := ret[0].(db.Batch)
return ret0
}
// NewBatch indicates an expected call of NewBatch.
func (mr *MockDBMockRecorder) NewBatch() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockDB)(nil).NewBatch))
}
// NewBatchWithSize mocks base method.
func (m *MockDB) NewBatchWithSize(arg0 int) db.Batch {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewBatchWithSize", arg0)
ret0, _ := ret[0].(db.Batch)
return ret0
}
// NewBatchWithSize indicates an expected call of NewBatchWithSize.
func (mr *MockDBMockRecorder) NewBatchWithSize(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatchWithSize", reflect.TypeOf((*MockDB)(nil).NewBatchWithSize), arg0)
}
// Print mocks base method.
func (m *MockDB) Print() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Print")
ret0, _ := ret[0].(error)
return ret0
}
// Print indicates an expected call of Print.
func (mr *MockDBMockRecorder) Print() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockDB)(nil).Print))
}
// ReverseIterator mocks base method.
func (m *MockDB) ReverseIterator(arg0, arg1 []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReverseIterator", arg0, arg1)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReverseIterator indicates an expected call of ReverseIterator.
func (mr *MockDBMockRecorder) ReverseIterator(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDB)(nil).ReverseIterator), arg0, arg1)
}
// Set mocks base method.
func (m *MockDB) Set(arg0, arg1 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Set", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Set indicates an expected call of Set.
func (mr *MockDBMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDB)(nil).Set), arg0, arg1)
}
// SetSync mocks base method.
func (m *MockDB) SetSync(arg0, arg1 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetSync", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// SetSync indicates an expected call of SetSync.
func (mr *MockDBMockRecorder) SetSync(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSync", reflect.TypeOf((*MockDB)(nil).SetSync), arg0, arg1)
}
// Stats mocks base method.
func (m *MockDB) Stats() map[string]string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Stats")
ret0, _ := ret[0].(map[string]string)
return ret0
}
// Stats indicates an expected call of Stats.
func (mr *MockDBMockRecorder) Stats() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDB)(nil).Stats))
}

View File

@ -1,207 +0,0 @@
package prefix
import (
"bytes"
"errors"
"io"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
var _ types.KVStore = Store{}
// Store is similar with cometbft/cometbft/libs/db/prefix_db
// both gives access only to the limited subset of the store
// for convinience or safety
type Store struct {
parent types.KVStore
prefix []byte
}
func NewStore(parent types.KVStore, prefix []byte) Store {
return Store{
parent: parent,
prefix: prefix,
}
}
func cloneAppend(bz, tail []byte) (res []byte) {
res = make([]byte, len(bz)+len(tail))
copy(res, bz)
copy(res[len(bz):], tail)
return
}
func (s Store) key(key []byte) (res []byte) {
if key == nil {
panic("nil key on Store")
}
res = cloneAppend(s.prefix, key)
return
}
// Implements Store
func (s Store) GetStoreType() types.StoreType {
return s.parent.GetStoreType()
}
// Implements CacheWrap
func (s Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
// CacheWrapWithTrace implements the KVStore interface.
func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
// Implements KVStore
func (s Store) Get(key []byte) []byte {
res := s.parent.Get(s.key(key))
return res
}
// Implements KVStore
func (s Store) Has(key []byte) bool {
return s.parent.Has(s.key(key))
}
// Implements KVStore
func (s Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
s.parent.Set(s.key(key), value)
}
// Implements KVStore
func (s Store) Delete(key []byte) {
s.parent.Delete(s.key(key))
}
// Implements KVStore
// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L106
func (s Store) Iterator(start, end []byte) types.Iterator {
newstart := cloneAppend(s.prefix, start)
var newend []byte
if end == nil {
newend = cpIncr(s.prefix)
} else {
newend = cloneAppend(s.prefix, end)
}
iter := s.parent.Iterator(newstart, newend)
return newPrefixIterator(s.prefix, start, end, iter)
}
// ReverseIterator implements KVStore
// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L129
func (s Store) ReverseIterator(start, end []byte) types.Iterator {
newstart := cloneAppend(s.prefix, start)
var newend []byte
if end == nil {
newend = cpIncr(s.prefix)
} else {
newend = cloneAppend(s.prefix, end)
}
iter := s.parent.ReverseIterator(newstart, newend)
return newPrefixIterator(s.prefix, start, end, iter)
}
var _ types.Iterator = (*prefixIterator)(nil)
type prefixIterator struct {
prefix []byte
start []byte
end []byte
iter types.Iterator
valid bool
}
func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefixIterator {
return &prefixIterator{
prefix: prefix,
start: start,
end: end,
iter: parent,
valid: parent.Valid() && bytes.HasPrefix(parent.Key(), prefix),
}
}
// Implements Iterator
func (pi *prefixIterator) Domain() ([]byte, []byte) {
return pi.start, pi.end
}
// Implements Iterator
func (pi *prefixIterator) Valid() bool {
return pi.valid && pi.iter.Valid()
}
// Implements Iterator
func (pi *prefixIterator) Next() {
if !pi.valid {
panic("prefixIterator invalid, cannot call Next()")
}
if pi.iter.Next(); !pi.iter.Valid() || !bytes.HasPrefix(pi.iter.Key(), pi.prefix) {
// TODO: shouldn't pi be set to nil instead?
pi.valid = false
}
}
// Implements Iterator
func (pi *prefixIterator) Key() (key []byte) {
if !pi.valid {
panic("prefixIterator invalid, cannot call Key()")
}
key = pi.iter.Key()
key = stripPrefix(key, pi.prefix)
return
}
// Implements Iterator
func (pi *prefixIterator) Value() []byte {
if !pi.valid {
panic("prefixIterator invalid, cannot call Value()")
}
return pi.iter.Value()
}
// Implements Iterator
func (pi *prefixIterator) Close() error {
return pi.iter.Close()
}
// Error returns an error if the prefixIterator is invalid defined by the Valid
// method.
func (pi *prefixIterator) Error() error {
if !pi.Valid() {
return errors.New("invalid prefixIterator")
}
return nil
}
// copied from github.com/cometbft/cometbft/libs/db/prefix_db.go
func stripPrefix(key, prefix []byte) []byte {
if len(key) < len(prefix) || !bytes.Equal(key[:len(prefix)], prefix) {
panic("should not happen")
}
return key[len(prefix):]
}
// wrapping types.PrefixEndBytes
func cpIncr(bz []byte) []byte {
return types.PrefixEndBytes(bz)
}

View File

@ -1,450 +0,0 @@
package prefix
import (
"crypto/rand"
"testing"
dbm "github.com/cosmos/cosmos-db"
tiavl "github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/gaskv"
"cosmossdk.io/store/iavl"
"cosmossdk.io/store/types"
)
// copied from iavl/store_test.go
var (
cacheSize = 100
)
func bz(s string) []byte { return []byte(s) }
type kvpair struct {
key []byte
value []byte
}
func genRandomKVPairs(t *testing.T) []kvpair {
t.Helper()
kvps := make([]kvpair, 20)
for i := 0; i < 20; i++ {
kvps[i].key = make([]byte, 32)
_, err := rand.Read(kvps[i].key)
require.NoError(t, err)
kvps[i].value = make([]byte, 32)
_, err = rand.Read(kvps[i].value)
require.NoError(t, err)
}
return kvps
}
func setRandomKVPairs(t *testing.T, store types.KVStore) []kvpair {
t.Helper()
kvps := genRandomKVPairs(t)
for _, kvp := range kvps {
store.Set(kvp.key, kvp.value)
}
return kvps
}
func testPrefixStore(t *testing.T, baseStore types.KVStore, prefix []byte) {
t.Helper()
prefixStore := NewStore(baseStore, prefix)
prefixPrefixStore := NewStore(prefixStore, []byte("prefix"))
require.Panics(t, func() { prefixStore.Get(nil) })
require.Panics(t, func() { prefixStore.Set(nil, []byte{}) })
kvps := setRandomKVPairs(t, prefixPrefixStore)
for i := 0; i < 20; i++ {
key := kvps[i].key
value := kvps[i].value
require.True(t, prefixPrefixStore.Has(key))
require.Equal(t, value, prefixPrefixStore.Get(key))
key = append([]byte("prefix"), key...)
require.True(t, prefixStore.Has(key))
require.Equal(t, value, prefixStore.Get(key))
key = append(prefix, key...)
require.True(t, baseStore.Has(key))
require.Equal(t, value, baseStore.Get(key))
key = kvps[i].key
prefixPrefixStore.Delete(key)
require.False(t, prefixPrefixStore.Has(key))
require.Nil(t, prefixPrefixStore.Get(key))
key = append([]byte("prefix"), key...)
require.False(t, prefixStore.Has(key))
require.Nil(t, prefixStore.Get(key))
key = append(prefix, key...)
require.False(t, baseStore.Has(key))
require.Nil(t, baseStore.Get(key))
}
}
func TestIAVLStorePrefix(t *testing.T) {
db := dbm.NewMemDB()
tree := tiavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := iavl.UnsafeNewStore(tree)
testPrefixStore(t, iavlStore, []byte("test"))
}
func TestPrefixKVStoreNoNilSet(t *testing.T) {
meter := types.NewGasMeter(100000000)
mem := dbadapter.Store{DB: dbm.NewMemDB()}
gasStore := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.Panics(t, func() { gasStore.Set([]byte("key"), nil) }, "setting a nil value should panic")
}
func TestPrefixStoreIterate(t *testing.T) {
db := dbm.NewMemDB()
baseStore := dbadapter.Store{DB: db}
prefix := []byte("test")
prefixStore := NewStore(baseStore, prefix)
setRandomKVPairs(t, prefixStore)
bIter := types.KVStorePrefixIterator(baseStore, prefix)
pIter := types.KVStorePrefixIterator(prefixStore, nil)
for bIter.Valid() && pIter.Valid() {
require.Equal(t, bIter.Key(), append(prefix, pIter.Key()...))
require.Equal(t, bIter.Value(), pIter.Value())
bIter.Next()
pIter.Next()
}
bIter.Close()
pIter.Close()
}
func incFirstByte(bz []byte) {
bz[0]++
}
func TestCloneAppend(t *testing.T) {
kvps := genRandomKVPairs(t)
for _, kvp := range kvps {
bz := cloneAppend(kvp.key, kvp.value)
require.Equal(t, bz, append(kvp.key, kvp.value...))
incFirstByte(bz)
require.NotEqual(t, bz, append(kvp.key, kvp.value...))
bz = cloneAppend(kvp.key, kvp.value)
incFirstByte(kvp.key)
require.NotEqual(t, bz, append(kvp.key, kvp.value...))
bz = cloneAppend(kvp.key, kvp.value)
incFirstByte(kvp.value)
require.NotEqual(t, bz, append(kvp.key, kvp.value...))
}
}
func TestPrefixStoreIteratorEdgeCase(t *testing.T) {
db := dbm.NewMemDB()
baseStore := dbadapter.Store{DB: db}
// overflow in cpIncr
prefix := []byte{0xAA, 0xFF, 0xFF}
prefixStore := NewStore(baseStore, prefix)
// ascending order
baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{})
baseStore.Set([]byte{0xAB}, []byte{})
baseStore.Set([]byte{0xAB, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{})
iter := prefixStore.Iterator(nil, nil)
checkDomain(t, iter, nil, nil)
checkItem(t, iter, []byte{}, bz(""))
checkNext(t, iter, true)
checkItem(t, iter, []byte{0x00}, bz(""))
checkNext(t, iter, false)
checkInvalid(t, iter)
iter.Close()
}
func TestPrefixStoreReverseIteratorEdgeCase(t *testing.T) {
db := dbm.NewMemDB()
baseStore := dbadapter.Store{DB: db}
// overflow in cpIncr
prefix := []byte{0xAA, 0xFF, 0xFF}
prefixStore := NewStore(baseStore, prefix)
// descending order
baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00}, []byte{})
baseStore.Set([]byte{0xAB}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{})
iter := prefixStore.ReverseIterator(nil, nil)
checkDomain(t, iter, nil, nil)
checkItem(t, iter, []byte{0x00}, bz(""))
checkNext(t, iter, true)
checkItem(t, iter, []byte{}, bz(""))
checkNext(t, iter, false)
checkInvalid(t, iter)
iter.Close()
db = dbm.NewMemDB()
baseStore = dbadapter.Store{DB: db}
// underflow in cpDecr
prefix = []byte{0xAA, 0x00, 0x00}
prefixStore = NewStore(baseStore, prefix)
baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00, 0x01}, []byte{})
baseStore.Set([]byte{0xAA, 0x00, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xA9, 0xFF, 0xFF, 0x00}, []byte{})
baseStore.Set([]byte{0xA9, 0xFF, 0xFF}, []byte{})
iter = prefixStore.ReverseIterator(nil, nil)
checkDomain(t, iter, nil, nil)
checkItem(t, iter, []byte{0x00}, bz(""))
checkNext(t, iter, true)
checkItem(t, iter, []byte{}, bz(""))
checkNext(t, iter, false)
checkInvalid(t, iter)
iter.Close()
}
// Tests below are ported from https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db_test.go
func mockStoreWithStuff() types.KVStore {
db := dbm.NewMemDB()
store := dbadapter.Store{DB: db}
// Under "key" prefix
store.Set(bz("key"), bz("value"))
store.Set(bz("key1"), bz("value1"))
store.Set(bz("key2"), bz("value2"))
store.Set(bz("key3"), bz("value3"))
store.Set(bz("something"), bz("else"))
store.Set(bz("k"), bz("val"))
store.Set(bz("ke"), bz("valu"))
store.Set(bz("kee"), bz("valuu"))
return store
}
func checkValue(t *testing.T, store types.KVStore, key, expected []byte) {
t.Helper()
bz := store.Get(key)
require.Equal(t, expected, bz)
}
func checkValid(t *testing.T, itr types.Iterator, expected bool) {
t.Helper()
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkNext(t *testing.T, itr types.Iterator, expected bool) {
t.Helper()
itr.Next()
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkDomain(t *testing.T, itr types.Iterator, start, end []byte) {
t.Helper()
ds, de := itr.Domain()
require.Equal(t, start, ds)
require.Equal(t, end, de)
}
func checkItem(t *testing.T, itr types.Iterator, key, value []byte) {
t.Helper()
require.Exactly(t, key, itr.Key())
require.Exactly(t, value, itr.Value())
}
func checkInvalid(t *testing.T, itr types.Iterator) {
t.Helper()
checkValid(t, itr, false)
checkKeyPanics(t, itr)
checkValuePanics(t, itr)
checkNextPanics(t, itr)
}
func checkKeyPanics(t *testing.T, itr types.Iterator) {
t.Helper()
require.Panics(t, func() { itr.Key() })
}
func checkValuePanics(t *testing.T, itr types.Iterator) {
t.Helper()
require.Panics(t, func() { itr.Value() })
}
func checkNextPanics(t *testing.T, itr types.Iterator) {
t.Helper()
require.Panics(t, func() { itr.Next() })
}
func TestPrefixDBSimple(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
checkValue(t, pstore, bz("key"), nil)
checkValue(t, pstore, bz(""), bz("value"))
checkValue(t, pstore, bz("key1"), nil)
checkValue(t, pstore, bz("1"), bz("value1"))
checkValue(t, pstore, bz("key2"), nil)
checkValue(t, pstore, bz("2"), bz("value2"))
checkValue(t, pstore, bz("key3"), nil)
checkValue(t, pstore, bz("3"), bz("value3"))
checkValue(t, pstore, bz("something"), nil)
checkValue(t, pstore, bz("k"), nil)
checkValue(t, pstore, bz("ke"), nil)
checkValue(t, pstore, bz("kee"), nil)
}
func TestPrefixDBIterator1(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(nil, nil)
checkDomain(t, itr, nil, nil)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator2(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(nil, bz(""))
checkDomain(t, itr, nil, bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator3(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(bz(""), nil)
checkDomain(t, itr, bz(""), nil)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator4(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(bz(""), bz(""))
checkDomain(t, itr, bz(""), bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator1(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(nil, nil)
checkDomain(t, itr, nil, nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator2(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(bz(""), nil)
checkDomain(t, itr, bz(""), nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator3(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(nil, bz(""))
checkDomain(t, itr, nil, bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator4(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(bz(""), bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestCacheWraps(t *testing.T) {
db := dbm.NewMemDB()
store := dbadapter.Store{DB: db}
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}

View File

@ -1,30 +0,0 @@
# Pruning
## Overview
Pruning is the mechanism for deleting old application heights from the disk. Depending on the use case,
nodes may require different pruning strategies. For example, archive nodes must keep all
the states and prune nothing. On the other hand, a regular validator node may want to only keep 100 latest heights for performance reasons.
## Strategies
The strategies are configured in `app.toml`, with the format `pruning = "<strategy>"` where the options are:
* `default`: only the last 362,880 states(approximately 3.5 weeks worth of state) are kept; pruning at 10 block intervals
* `nothing`: all historic states will be saved, nothing will be deleted (i.e. archiving node)
* `everything`: 2 latest states will be kept; pruning at 10 block intervals.
* `custom`: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval'
If no strategy is given to the BaseApp, `nothing` is selected. However, we perform validation on the CLI layer to require these to be always set in the config file.
## Custom Pruning
These are applied if and only if the pruning strategy is custom:
* `pruning-keep-recent`: N means to keep all of the last N states
* `pruning-interval`: N means to delete old states from disk every Nth block.
## Relationship to State Sync Snapshots
Snapshot settings are optional. However, if set, they have an effect on how pruning is done by
persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. See the "Relationship to Pruning" section in `snapshots/README.md` for more details.

View File

@ -1,8 +0,0 @@
package pruning
var (
PruneSnapshotHeightsKey = pruneSnapshotHeightsKey
Int64SliceToBytes = int64SliceToBytes
LoadPruningSnapshotHeights = loadPruningSnapshotHeights
)

View File

@ -1,191 +0,0 @@
package pruning
import (
"encoding/binary"
"fmt"
"sort"
"sync"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/log"
"cosmossdk.io/store/pruning/types"
)
// Manager is an abstraction to handle the logic needed for
// determining when to prune old heights of the store
// based on the strategy described by the pruning options.
type Manager struct {
db dbm.DB
logger log.Logger
opts types.PruningOptions
snapshotInterval uint64
// Snapshots are taken in a separate goroutine from the regular execution
// and can be delivered asynchrounously via HandleSnapshotHeight.
// Therefore, we sync access to pruneSnapshotHeights with this mutex.
pruneSnapshotHeightsMx sync.RWMutex
// These are the heights that are multiples of snapshotInterval and kept for state sync snapshots.
// The heights are added to be pruned when a snapshot is complete.
pruneSnapshotHeights []int64
}
// NegativeHeightsError is returned when a negative height is provided to the manager.
type NegativeHeightsError struct {
Height int64
}
var _ error = &NegativeHeightsError{}
func (e *NegativeHeightsError) Error() string {
return fmt.Sprintf("failed to get pruned heights: %d", e.Height)
}
var pruneSnapshotHeightsKey = []byte("s/prunesnapshotheights")
// NewManager returns a new Manager with the given db and logger.
// The retuned manager uses a pruning strategy of "nothing" which
// keeps all heights. Users of the Manager may change the strategy
// by calling SetOptions.
func NewManager(db dbm.DB, logger log.Logger) *Manager {
return &Manager{
db: db,
logger: logger,
opts: types.NewPruningOptions(types.PruningNothing),
pruneSnapshotHeights: []int64{0},
}
}
// SetOptions sets the pruning strategy on the manager.
func (m *Manager) SetOptions(opts types.PruningOptions) {
m.opts = opts
}
// GetOptions fetches the pruning strategy from the manager.
func (m *Manager) GetOptions() types.PruningOptions {
return m.opts
}
// HandleSnapshotHeight persists the snapshot height to be pruned at the next appropriate
// height defined by the pruning strategy. It flushes the update to disk and panics if the flush fails.
// The input height must be greater than 0, and the pruning strategy must not be set to pruning nothing.
// If either of these conditions is not met, this function does nothing.
func (m *Manager) HandleSnapshotHeight(height int64) {
if m.opts.GetPruningStrategy() == types.PruningNothing || height <= 0 {
return
}
m.pruneSnapshotHeightsMx.Lock()
defer m.pruneSnapshotHeightsMx.Unlock()
m.logger.Debug("HandleSnapshotHeight", "height", height)
m.pruneSnapshotHeights = append(m.pruneSnapshotHeights, height)
sort.Slice(m.pruneSnapshotHeights, func(i, j int) bool { return m.pruneSnapshotHeights[i] < m.pruneSnapshotHeights[j] })
k := 1
for ; k < len(m.pruneSnapshotHeights); k++ {
if m.pruneSnapshotHeights[k] != m.pruneSnapshotHeights[k-1]+int64(m.snapshotInterval) {
break
}
}
m.pruneSnapshotHeights = m.pruneSnapshotHeights[k-1:]
// flush the updates to disk so that they are not lost if crash happens.
if err := m.db.SetSync(pruneSnapshotHeightsKey, int64SliceToBytes(m.pruneSnapshotHeights)); err != nil {
panic(err)
}
}
// SetSnapshotInterval sets the interval at which the snapshots are taken.
func (m *Manager) SetSnapshotInterval(snapshotInterval uint64) {
m.snapshotInterval = snapshotInterval
}
// GetPruningHeight returns the height which can prune upto if it is able to prune at the given height.
func (m *Manager) GetPruningHeight(height int64) int64 {
if m.opts.GetPruningStrategy() == types.PruningNothing {
return 0
}
if m.opts.Interval <= 0 {
return 0
}
if height%int64(m.opts.Interval) != 0 || height <= int64(m.opts.KeepRecent) {
return 0
}
// Consider the snapshot height
pruneHeight := height - 1 - int64(m.opts.KeepRecent) // we should keep the current height at least
m.pruneSnapshotHeightsMx.RLock()
defer m.pruneSnapshotHeightsMx.RUnlock()
// snapshotInterval is zero, indicating that all heights can be pruned
if m.snapshotInterval <= 0 {
return pruneHeight
}
if len(m.pruneSnapshotHeights) == 0 { // the length should be greater than zero
return 0
}
// the snapshot `m.pruneSnapshotHeights[0]` is already operated,
// so we can prune upto `m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1`
snHeight := m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1
if snHeight < pruneHeight {
return snHeight
}
return pruneHeight
}
// LoadSnapshotHeights loads the snapshot heights from the database as a crash recovery.
func (m *Manager) LoadSnapshotHeights(db dbm.DB) error {
if m.opts.GetPruningStrategy() == types.PruningNothing {
return nil
}
loadedPruneSnapshotHeights, err := loadPruningSnapshotHeights(db)
if err != nil {
return err
}
if len(loadedPruneSnapshotHeights) > 0 {
m.pruneSnapshotHeightsMx.Lock()
defer m.pruneSnapshotHeightsMx.Unlock()
m.pruneSnapshotHeights = loadedPruneSnapshotHeights
}
return nil
}
func loadPruningSnapshotHeights(db dbm.DB) ([]int64, error) {
bz, err := db.Get(pruneSnapshotHeightsKey)
if err != nil {
return nil, fmt.Errorf("failed to get post-snapshot pruned heights: %w", err)
}
if len(bz) == 0 {
return []int64{}, nil
}
pruneSnapshotHeights := make([]int64, len(bz)/8)
i, offset := 0, 0
for offset < len(bz) {
h := int64(binary.BigEndian.Uint64(bz[offset : offset+8]))
if h < 0 {
return nil, &NegativeHeightsError{Height: h}
}
pruneSnapshotHeights[i] = h
i++
offset += 8
}
return pruneSnapshotHeights, nil
}
func int64SliceToBytes(slice []int64) []byte {
bz := make([]byte, 0, len(slice)*8)
for _, ph := range slice {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(ph))
bz = append(bz, buf...)
}
return bz
}

View File

@ -1,303 +0,0 @@
package pruning_test
import (
"errors"
"fmt"
"testing"
db "github.com/cosmos/cosmos-db"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/mock"
"cosmossdk.io/store/pruning"
"cosmossdk.io/store/pruning/types"
)
const dbErr = "db error"
func TestNewManager(t *testing.T) {
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
require.Equal(t, types.PruningNothing, manager.GetOptions().GetPruningStrategy())
}
func TestStrategies(t *testing.T) {
testcases := map[string]struct {
strategy types.PruningOptions
snapshotInterval uint64
strategyToAssert types.PruningStrategy
isValid bool
}{
"prune nothing - no snapshot": {
strategy: types.NewPruningOptions(types.PruningNothing),
strategyToAssert: types.PruningNothing,
},
"prune nothing - snapshot": {
strategy: types.NewPruningOptions(types.PruningNothing),
strategyToAssert: types.PruningNothing,
snapshotInterval: 100,
},
"prune default - no snapshot": {
strategy: types.NewPruningOptions(types.PruningDefault),
strategyToAssert: types.PruningDefault,
},
"prune default - snapshot": {
strategy: types.NewPruningOptions(types.PruningDefault),
strategyToAssert: types.PruningDefault,
snapshotInterval: 100,
},
"prune everything - no snapshot": {
strategy: types.NewPruningOptions(types.PruningEverything),
strategyToAssert: types.PruningEverything,
},
"prune everything - snapshot": {
strategy: types.NewPruningOptions(types.PruningEverything),
strategyToAssert: types.PruningEverything,
snapshotInterval: 100,
},
"custom 100-10-15": {
strategy: types.NewCustomPruningOptions(100, 15),
snapshotInterval: 10,
strategyToAssert: types.PruningCustom,
},
"custom 10-10-15": {
strategy: types.NewCustomPruningOptions(10, 15),
snapshotInterval: 10,
strategyToAssert: types.PruningCustom,
},
"custom 100-0-15": {
strategy: types.NewCustomPruningOptions(100, 15),
snapshotInterval: 0,
strategyToAssert: types.PruningCustom,
},
}
for name, tc := range testcases {
tc := tc // Local copy to avoid shadowing.
t.Run(name, func(t *testing.T) {
t.Parallel()
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
curStrategy := tc.strategy
manager.SetSnapshotInterval(tc.snapshotInterval)
pruneStrategy := curStrategy.GetPruningStrategy()
require.Equal(t, tc.strategyToAssert, pruneStrategy)
// Validate strategy parameters
switch pruneStrategy {
case types.PruningDefault:
require.Equal(t, uint64(362880), curStrategy.KeepRecent)
require.Equal(t, uint64(10), curStrategy.Interval)
case types.PruningNothing:
require.Equal(t, uint64(0), curStrategy.KeepRecent)
require.Equal(t, uint64(0), curStrategy.Interval)
case types.PruningEverything:
require.Equal(t, uint64(2), curStrategy.KeepRecent)
require.Equal(t, uint64(10), curStrategy.Interval)
default:
//
}
manager.SetOptions(curStrategy)
require.Equal(t, tc.strategy, manager.GetOptions())
curKeepRecent := curStrategy.KeepRecent
snHeight := int64(tc.snapshotInterval - 1)
for curHeight := int64(0); curHeight < 110000; curHeight++ {
if tc.snapshotInterval != 0 {
if curHeight > int64(tc.snapshotInterval) && curHeight%int64(tc.snapshotInterval) == int64(tc.snapshotInterval)-1 {
manager.HandleSnapshotHeight(curHeight - int64(tc.snapshotInterval) + 1)
snHeight = curHeight
}
}
pruningHeightActual := manager.GetPruningHeight(curHeight)
curHeightStr := fmt.Sprintf("height: %d", curHeight)
switch curStrategy.GetPruningStrategy() {
case types.PruningNothing:
require.Equal(t, int64(0), pruningHeightActual, curHeightStr)
default:
if curHeight > int64(curKeepRecent) && curHeight%int64(curStrategy.Interval) == 0 {
pruningHeightExpected := curHeight - int64(curKeepRecent) - 1
if tc.snapshotInterval > 0 && snHeight < pruningHeightExpected {
pruningHeightExpected = snHeight
}
require.Equal(t, pruningHeightExpected, pruningHeightActual, curHeightStr)
} else {
require.Equal(t, int64(0), pruningHeightActual, curHeightStr)
}
}
}
})
}
}
func TestPruningHeight_Inputs(t *testing.T) {
keepRecent := int64(types.NewPruningOptions(types.PruningEverything).KeepRecent)
interval := int64(types.NewPruningOptions(types.PruningEverything).Interval)
testcases := map[string]struct {
height int64
expectedResult int64
strategy types.PruningStrategy
}{
"currentHeight is negative - prune everything - invalid currentHeight": {
-1,
0,
types.PruningEverything,
},
"currentHeight is zero - prune everything - invalid currentHeight": {
0,
0,
types.PruningEverything,
},
"currentHeight is positive but within keep recent- prune everything - not kept": {
keepRecent,
0,
types.PruningEverything,
},
"currentHeight is positive and equal to keep recent+1 - no kept": {
keepRecent + 1,
0,
types.PruningEverything,
},
"currentHeight is positive and greater than keep recent+1 but not multiple of interval - no kept": {
keepRecent + 2,
0,
types.PruningEverything,
},
"currentHeight is positive and greater than keep recent+1 and multiple of interval - kept": {
interval,
interval - keepRecent - 1,
types.PruningEverything,
},
"pruning nothing, currentHeight is positive and greater than keep recent - not kept": {
interval,
0,
types.PruningNothing,
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
manager.SetOptions(types.NewPruningOptions(tc.strategy))
pruningHeightActual := manager.GetPruningHeight(tc.height)
require.Equal(t, tc.expectedResult, pruningHeightActual)
})
}
}
func TestHandleSnapshotHeight_DbErr_Panic(t *testing.T) {
ctrl := gomock.NewController(t)
// Setup
dbMock := mock.NewMockDB(ctrl)
dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1)
manager := pruning.NewManager(dbMock, log.NewNopLogger())
manager.SetOptions(types.NewPruningOptions(types.PruningEverything))
require.NotNil(t, manager)
defer func() {
if r := recover(); r == nil {
t.Fail()
}
}()
manager.HandleSnapshotHeight(10)
}
func TestHandleSnapshotHeight_LoadFromDisk(t *testing.T) {
snapshotInterval := uint64(10)
// Setup
db := db.NewMemDB()
manager := pruning.NewManager(db, log.NewNopLogger())
require.NotNil(t, manager)
manager.SetOptions(types.NewPruningOptions(types.PruningEverything))
manager.SetSnapshotInterval(snapshotInterval)
expected := 0
for snapshotHeight := int64(-1); snapshotHeight < 100; snapshotHeight++ {
snapshotHeightStr := fmt.Sprintf("snaphost height: %d", snapshotHeight)
if snapshotHeight > int64(snapshotInterval) && snapshotHeight%int64(snapshotInterval) == 1 {
// Test flush
manager.HandleSnapshotHeight(snapshotHeight - 1)
expected = 1
}
loadedSnapshotHeights, err := pruning.LoadPruningSnapshotHeights(db)
require.NoError(t, err)
require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr)
// Test load back
err = manager.LoadSnapshotHeights(db)
require.NoError(t, err)
loadedSnapshotHeights, err = pruning.LoadPruningSnapshotHeights(db)
require.NoError(t, err)
require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr)
}
}
func TestLoadPruningSnapshotHeights(t *testing.T) {
var (
manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
err error
)
require.NotNil(t, manager)
// must not be PruningNothing
manager.SetOptions(types.NewPruningOptions(types.PruningDefault))
testcases := map[string]struct {
getFlushedPruningSnapshotHeights func() []int64
expectedResult error
}{
"negative snapshotPruningHeight - error": {
getFlushedPruningSnapshotHeights: func() []int64 {
return []int64{5, -2, 3}
},
expectedResult: &pruning.NegativeHeightsError{Height: -2},
},
"non-negative - success": {
getFlushedPruningSnapshotHeights: func() []int64 {
return []int64{5, 0, 3}
},
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
db := db.NewMemDB()
if tc.getFlushedPruningSnapshotHeights != nil {
err = db.Set(pruning.PruneSnapshotHeightsKey, pruning.Int64SliceToBytes(tc.getFlushedPruningSnapshotHeights()))
require.NoError(t, err)
}
err = manager.LoadSnapshotHeights(db)
require.Equal(t, tc.expectedResult, err)
})
}
}
func TestLoadSnapshotHeights_PruneNothing(t *testing.T) {
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
manager.SetOptions(types.NewPruningOptions(types.PruningNothing))
require.Nil(t, manager.LoadSnapshotHeights(db.NewMemDB()))
}

View File

@ -1,130 +0,0 @@
package types
import (
"errors"
"fmt"
)
// PruningOptions defines the pruning strategy used when determining which
// heights are removed from disk when committing state.
type PruningOptions struct {
// KeepRecent defines how many recent heights to keep on disk.
KeepRecent uint64
// Interval defines when the pruned heights are removed from disk.
Interval uint64
// Strategy defines the kind of pruning strategy. See below for more information on each.
Strategy PruningStrategy
}
type PruningStrategy int
// Pruning option string constants
const (
PruningOptionDefault = "default"
PruningOptionEverything = "everything"
PruningOptionNothing = "nothing"
PruningOptionCustom = "custom"
)
const (
// PruningDefault defines a pruning strategy where the last 362880 heights are
// kept where to-be pruned heights are pruned at every 10th height.
// The last 362880 heights are kept(approximately 3.5 weeks worth of state) assuming the typical
// block time is 6s. If these values do not match the applications' requirements, use the "custom" option.
PruningDefault PruningStrategy = iota
// PruningEverything defines a pruning strategy where all committed heights are
// deleted, storing only the current height and last 2 states. To-be pruned heights are
// pruned at every 10th height.
PruningEverything
// PruningNothing defines a pruning strategy where all heights are kept on disk.
// This is the only stretegy where KeepEvery=1 is allowed with state-sync snapshots disabled.
PruningNothing
// PruningCustom defines a pruning strategy where the user specifies the pruning.
PruningCustom
// PruningUndefined defines an undefined pruning strategy. It is to be returned by stores that do not support pruning.
PruningUndefined
)
const (
pruneEverythingKeepRecent = 2
pruneEverythingInterval = 10
)
var (
ErrPruningIntervalZero = errors.New("'pruning-interval' must not be 0. If you want to disable pruning, select pruning = \"nothing\"")
ErrPruningIntervalTooSmall = fmt.Errorf("'pruning-interval' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingInterval)
ErrPruningKeepRecentTooSmall = fmt.Errorf("'pruning-keep-recent' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingKeepRecent)
)
func NewPruningOptions(pruningStrategy PruningStrategy) PruningOptions {
switch pruningStrategy {
case PruningDefault:
return PruningOptions{
KeepRecent: 362880,
Interval: 10,
Strategy: PruningDefault,
}
case PruningEverything:
return PruningOptions{
KeepRecent: pruneEverythingKeepRecent,
Interval: pruneEverythingInterval,
Strategy: PruningEverything,
}
case PruningNothing:
return PruningOptions{
KeepRecent: 0,
Interval: 0,
Strategy: PruningNothing,
}
default:
return PruningOptions{
Strategy: PruningCustom,
}
}
}
func NewCustomPruningOptions(keepRecent, interval uint64) PruningOptions {
return PruningOptions{
KeepRecent: keepRecent,
Interval: interval,
Strategy: PruningCustom,
}
}
func (po PruningOptions) GetPruningStrategy() PruningStrategy {
return po.Strategy
}
func (po PruningOptions) Validate() error {
if po.Strategy == PruningNothing {
return nil
}
if po.Interval == 0 {
return ErrPruningIntervalZero
}
if po.Interval < pruneEverythingInterval {
return ErrPruningIntervalTooSmall
}
if po.KeepRecent < pruneEverythingKeepRecent {
return ErrPruningKeepRecentTooSmall
}
return nil
}
func NewPruningOptionsFromString(strategy string) PruningOptions {
switch strategy {
case PruningOptionEverything:
return NewPruningOptions(PruningEverything)
case PruningOptionNothing:
return NewPruningOptions(PruningNothing)
case PruningOptionDefault:
return NewPruningOptions(PruningDefault)
default:
return NewPruningOptions(PruningDefault)
}
}

View File

@ -1,65 +0,0 @@
package types
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestPruningOptions_Validate(t *testing.T) {
testCases := []struct {
opts PruningOptions
expectErr error
}{
{NewPruningOptions(PruningDefault), nil},
{NewPruningOptions(PruningEverything), nil},
{NewPruningOptions(PruningNothing), nil},
{NewPruningOptions(PruningCustom), ErrPruningIntervalZero},
{NewCustomPruningOptions(2, 10), nil},
{NewCustomPruningOptions(100, 15), nil},
{NewCustomPruningOptions(1, 10), ErrPruningKeepRecentTooSmall},
{NewCustomPruningOptions(2, 9), ErrPruningIntervalTooSmall},
{NewCustomPruningOptions(2, 0), ErrPruningIntervalZero},
{NewCustomPruningOptions(2, 0), ErrPruningIntervalZero},
}
for _, tc := range testCases {
err := tc.opts.Validate()
require.Equal(t, tc.expectErr, err, "options: %v, err: %s", tc.opts, err)
}
}
func TestPruningOptions_GetStrategy(t *testing.T) {
testCases := []struct {
opts PruningOptions
expectedStrategy PruningStrategy
}{
{NewPruningOptions(PruningDefault), PruningDefault},
{NewPruningOptions(PruningEverything), PruningEverything},
{NewPruningOptions(PruningNothing), PruningNothing},
{NewPruningOptions(PruningCustom), PruningCustom},
{NewCustomPruningOptions(2, 10), PruningCustom},
}
for _, tc := range testCases {
actualStrategy := tc.opts.GetPruningStrategy()
require.Equal(t, tc.expectedStrategy, actualStrategy)
}
}
func TestNewPruningOptionsFromString(t *testing.T) {
testCases := []struct {
optString string
expect PruningOptions
}{
{PruningOptionDefault, NewPruningOptions(PruningDefault)},
{PruningOptionEverything, NewPruningOptions(PruningEverything)},
{PruningOptionNothing, NewPruningOptions(PruningNothing)},
{"invalid", NewPruningOptions(PruningDefault)},
}
for _, tc := range testCases {
actual := NewPruningOptionsFromString(tc.optString)
require.Equal(t, tc.expect, actual)
}
}

View File

@ -1,29 +0,0 @@
package store
import (
"cosmossdk.io/store/types"
)
// Import cosmos-sdk/types/store.go for convenience.
type (
Store = types.Store
Committer = types.Committer
CommitStore = types.CommitStore
MultiStore = types.MultiStore
CacheMultiStore = types.CacheMultiStore
CommitMultiStore = types.CommitMultiStore
KVStore = types.KVStore
Iterator = types.Iterator
CacheKVStore = types.CacheKVStore
CommitKVStore = types.CommitKVStore
CacheWrapper = types.CacheWrapper
CacheWrap = types.CacheWrap
CommitID = types.CommitID
Key = types.StoreKey
Type = types.StoreType
Queryable = types.Queryable
TraceContext = types.TraceContext
Gas = types.Gas
GasMeter = types.GasMeter
GasConfig = types.GasConfig
)

364
store/root/store.go Normal file
View File

@ -0,0 +1,364 @@
package root
import (
"bytes"
"fmt"
"io"
"slices"
"github.com/cockroachdb/errors"
ics23 "github.com/cosmos/ics23/go"
"cosmossdk.io/log"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/branchkv"
"cosmossdk.io/store/v2/commitment"
"cosmossdk.io/store/v2/tracekv"
)
// defaultStoreKey defines the default store key used for the single SC backend.
// Note, however, this store key is essentially irrelevant as it's not exposed
// to the user and it only needed to fulfill usage of StoreInfo during Commit.
const defaultStoreKey = "default"
var _ store.RootStore = (*Store)(nil)
// Store defines the SDK's default RootStore implementation. It contains a single
// State Storage (SS) backend and a single State Commitment (SC) backend. Note,
// this means all store keys are ignored and commitments exist in a single commitment
// tree.
type Store struct {
logger log.Logger
initialVersion uint64
// stateStore reflects the state storage backend
stateStore store.VersionedDatabase
// stateCommitment reflects the state commitment (SC) backend
stateCommitment *commitment.Database
// rootKVStore reflects the root BranchedKVStore that is used to accumulate writes
// and branch off of.
rootKVStore store.BranchedKVStore
// commitHeader reflects the header used when committing state (note, this isn't required and only used for query purposes)
commitHeader store.CommitHeader
// lastCommitInfo reflects the last version/hash that has been committed
lastCommitInfo *store.CommitInfo
// workingHash defines the current (yet to be committed) hash
workingHash []byte
// traceWriter defines a writer for store tracing operation
traceWriter io.Writer
// traceContext defines the tracing context, if any, for trace operations
traceContext store.TraceContext
}
func New(
logger log.Logger,
initVersion uint64,
ss store.VersionedDatabase,
sc *commitment.Database,
) (store.RootStore, error) {
rootKVStore, err := branchkv.New(defaultStoreKey, ss)
if err != nil {
return nil, err
}
return &Store{
logger: logger.With("module", "root_store"),
initialVersion: initVersion,
stateStore: ss,
stateCommitment: sc,
rootKVStore: rootKVStore,
}, nil
}
// Close closes the store and resets all internal fields. Note, Close() is NOT
// idempotent and should only be called once.
func (s *Store) Close() (err error) {
err = errors.Join(err, s.stateStore.Close())
err = errors.Join(err, s.stateCommitment.Close())
s.stateStore = nil
s.stateCommitment = nil
s.lastCommitInfo = nil
s.commitHeader = nil
return err
}
// MountSCStore performs a no-op as a SC backend must be provided at initialization.
func (s *Store) MountSCStore(_ string, _ store.Tree) error {
return errors.New("cannot mount SC store; SC must be provided on initialization")
}
// GetSCStore returns the store's state commitment (SC) backend. Note, the store
// key is ignored as there exists only a single SC tree.
func (s *Store) GetSCStore(_ string) store.Tree {
return s.stateCommitment
}
func (s *Store) LoadLatestVersion() error {
lv, err := s.GetLatestVersion()
if err != nil {
return err
}
return s.loadVersion(lv, nil)
}
// LastCommitID returns a CommitID based off of the latest internal CommitInfo.
// If an internal CommitInfo is not set, a new one will be returned with only the
// latest version set, which is based off of the SS view.
func (s *Store) LastCommitID() (store.CommitID, error) {
if s.lastCommitInfo != nil {
return s.lastCommitInfo.CommitID(), nil
}
// XXX/TODO: We cannot use SS to get the latest version when lastCommitInfo
// is nil if SS is flushed asynchronously. This is because the latest version
// in SS might not be the latest version in the SC stores.
//
// Ref: https://github.com/cosmos/cosmos-sdk/issues/17314
latestVersion, err := s.stateStore.GetLatestVersion()
if err != nil {
return store.CommitID{}, err
}
// sanity check: ensure integrity of latest version against SC
scVersion := s.stateCommitment.GetLatestVersion()
if scVersion != latestVersion {
return store.CommitID{}, fmt.Errorf("SC and SS version mismatch; got: %d, expected: %d", scVersion, latestVersion)
}
return store.CommitID{Version: latestVersion}, nil
}
// GetLatestVersion returns the latest version based on the latest internal
// CommitInfo. An error is returned if the latest CommitInfo or version cannot
// be retrieved.
func (s *Store) GetLatestVersion() (uint64, error) {
lastCommitID, err := s.LastCommitID()
if err != nil {
return 0, err
}
return lastCommitID.Version, nil
}
// GetProof delegates the GetProof to the store's underlying SC backend.
func (s *Store) GetProof(_ string, version uint64, key []byte) (*ics23.CommitmentProof, error) {
return s.stateCommitment.GetProof(version, key)
}
// LoadVersion loads a specific version returning an error upon failure.
func (s *Store) LoadVersion(v uint64) (err error) {
return s.loadVersion(v, nil)
}
// GetKVStore returns the store's root KVStore. Any writes to this store without
// branching will be committed to SC and SS upon Commit(). Branching will create
// a branched KVStore that allow writes to be discarded and propagated to the
// root KVStore using Write().
func (s *Store) GetKVStore(_ string) store.KVStore {
if s.TracingEnabled() {
return tracekv.New(s.rootKVStore, s.traceWriter, s.traceContext)
}
return s.rootKVStore
}
func (s *Store) GetBranchedKVStore(_ string) store.BranchedKVStore {
if s.TracingEnabled() {
return tracekv.New(s.rootKVStore, s.traceWriter, s.traceContext)
}
return s.rootKVStore
}
func (s *Store) loadVersion(v uint64, upgrades any) error {
s.logger.Debug("loading version", "version", v)
if err := s.stateCommitment.LoadVersion(v); err != nil {
return fmt.Errorf("failed to load SS version %d: %w", v, err)
}
// TODO: Complete this method to handle upgrades. See legacy RMS loadVersion()
// for reference.
//
// Ref: https://github.com/cosmos/cosmos-sdk/issues/17314
return nil
}
func (s *Store) SetTracingContext(tc store.TraceContext) {
s.traceContext = tc
}
func (s *Store) SetTracer(w io.Writer) {
s.traceWriter = w
}
func (s *Store) TracingEnabled() bool {
return s.traceWriter != nil
}
func (s *Store) SetCommitHeader(h store.CommitHeader) {
s.commitHeader = h
}
// Branch a copy of the Store with a branched underlying root KVStore. Any call
// to GetKVStore and GetBranchedKVStore returns the branched KVStore.
func (s *Store) Branch() store.BranchedRootStore {
branch := s.rootKVStore.Branch()
return &Store{
logger: s.logger,
initialVersion: s.initialVersion,
stateStore: s.stateStore,
stateCommitment: s.stateCommitment,
rootKVStore: branch,
commitHeader: s.commitHeader,
lastCommitInfo: s.lastCommitInfo,
traceWriter: s.traceWriter,
traceContext: s.traceContext,
}
}
// WorkingHash returns the working hash of the root store. Note, WorkingHash()
// should only be called once per block once all writes are complete and prior
// to Commit() being called.
//
// If working hash is nil, then we need to compute and set it on the root store
// by constructing a CommitInfo object, which in turn creates and writes a batch
// of the current changeset to the SC tree.
func (s *Store) WorkingHash() ([]byte, error) {
if s.workingHash == nil {
if err := s.writeSC(); err != nil {
return nil, err
}
s.workingHash = s.lastCommitInfo.Hash()
}
return slices.Clone(s.workingHash), nil
}
func (s *Store) Write() {
s.rootKVStore.Write()
}
// Commit commits all state changes to the underlying SS and SC backends. Note,
// at the time of Commit(), we expect WorkingHash() to have already been called,
// which internally sets the working hash, retrieved by writing a batch of the
// changeset to the SC tree, and CommitInfo on the root store. The changeset is
// retrieved from the rootKVStore and represents the entire set of writes to be
// committed. The same changeset is used to flush writes to the SS backend.
//
// Note, Commit() commits SC and SC synchronously.
func (s *Store) Commit() ([]byte, error) {
if s.workingHash == nil {
return nil, fmt.Errorf("working hash is nil; must call WorkingHash() before Commit()")
}
version := s.lastCommitInfo.Version
if s.commitHeader != nil && s.commitHeader.GetHeight() != version {
s.logger.Debug("commit header and version mismatch", "header_height", s.commitHeader.GetHeight(), "version", version)
}
changeset := s.rootKVStore.GetChangeset()
// commit SS
if err := s.stateStore.ApplyChangeset(version, changeset); err != nil {
return nil, fmt.Errorf("failed to commit SS: %w", err)
}
// commit SC
if err := s.commitSC(); err != nil {
return nil, fmt.Errorf("failed to commit SC stores: %w", err)
}
if s.commitHeader != nil {
s.lastCommitInfo.Timestamp = s.commitHeader.GetTime()
}
if err := s.rootKVStore.Reset(); err != nil {
return nil, fmt.Errorf("failed to reset root KVStore: %w", err)
}
s.workingHash = nil
return s.lastCommitInfo.Hash(), nil
}
// writeSC gets the current changeset from the rootKVStore and writes that as a
// batch to the underlying SC tree, which allows us to retrieve the working hash
// of the SC tree. Finally, we construct a *CommitInfo and return the hash.
// Note, this should only be called once per block!
func (s *Store) writeSC() error {
changeSet := s.rootKVStore.GetChangeset()
if err := s.stateCommitment.WriteBatch(changeSet); err != nil {
return fmt.Errorf("failed to write batch to SC store: %w", err)
}
var previousHeight, version uint64
if s.lastCommitInfo.GetVersion() == 0 && s.initialVersion > 1 {
// This case means that no commit has been made in the store, we
// start from initialVersion.
version = s.initialVersion
} else {
// This case can means two things:
//
// 1. There was already a previous commit in the store, in which case we
// increment the version from there.
// 2. There was no previous commit, and initial version was not set, in which
// case we start at version 1.
previousHeight = s.lastCommitInfo.GetVersion()
version = previousHeight + 1
}
workingHash := s.stateCommitment.WorkingHash()
s.lastCommitInfo = &store.CommitInfo{
Version: version,
StoreInfos: []store.StoreInfo{
{
Name: defaultStoreKey,
CommitID: store.CommitID{
Version: version,
Hash: workingHash,
},
},
},
}
return nil
}
// commitSC commits the SC store. At this point, a batch of the current changeset
// should have already been written to the SC via WorkingHash(). This method
// solely commits that batch. An error is returned if commit fails or if the
// resulting commit hash is not equivalent to the working hash.
func (s *Store) commitSC() error {
commitBz, err := s.stateCommitment.Commit()
if err != nil {
return fmt.Errorf("failed to commit SC store: %w", err)
}
workingHash, err := s.WorkingHash()
if err != nil {
return fmt.Errorf("failed to get working hash: %w", err)
}
if bytes.Equal(commitBz, workingHash) {
return fmt.Errorf("unexpected commit hash; got: %X, expected: %X", commitBz, workingHash)
}
return nil
}

220
store/root/store_test.go Normal file
View File

@ -0,0 +1,220 @@
package root
import (
"fmt"
"io"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/suite"
"cosmossdk.io/log"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/commitment"
"cosmossdk.io/store/v2/commitment/iavl"
"cosmossdk.io/store/v2/storage/sqlite"
)
type RootStoreTestSuite struct {
suite.Suite
rootStore store.RootStore
}
func TestStorageTestSuite(t *testing.T) {
suite.Run(t, &RootStoreTestSuite{})
}
func (s *RootStoreTestSuite) SetupTest() {
noopLog := log.NewNopLogger()
ss, err := sqlite.New(s.T().TempDir())
s.Require().NoError(err)
tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig())
sc := commitment.NewDatabase(tree)
rs, err := New(noopLog, 1, ss, sc)
s.Require().NoError(err)
rs.SetTracer(io.Discard)
rs.SetTracingContext(store.TraceContext{
"test": s.T().Name(),
})
s.rootStore = rs
}
func (s *RootStoreTestSuite) TearDownTest() {
err := s.rootStore.Close()
s.Require().NoError(err)
}
func (s *RootStoreTestSuite) TestMountSCStore() {
s.Require().Error(s.rootStore.MountSCStore("", nil))
}
func (s *RootStoreTestSuite) TestGetSCStore() {
s.Require().Equal(s.rootStore.GetSCStore(""), s.rootStore.(*Store).stateCommitment)
}
func (s *RootStoreTestSuite) TestGetKVStore() {
kvs := s.rootStore.GetKVStore("")
s.Require().NotNil(kvs)
}
func (s *RootStoreTestSuite) TestGetBranchedKVStore() {
bs := s.rootStore.GetBranchedKVStore("")
s.Require().NotNil(bs)
s.Require().Empty(bs.GetChangeset().Pairs)
}
func (s *RootStoreTestSuite) TestGetProof() {
p, err := s.rootStore.GetProof("", 1, []byte("foo"))
s.Require().Error(err)
s.Require().Nil(p)
// write and commit a changeset
bs := s.rootStore.GetBranchedKVStore("")
bs.Set([]byte("foo"), []byte("bar"))
workingHash, err := s.rootStore.WorkingHash()
s.Require().NoError(err)
s.Require().NotNil(workingHash)
commitHash, err := s.rootStore.Commit()
s.Require().NoError(err)
s.Require().NotNil(commitHash)
s.Require().Equal(workingHash, commitHash)
// ensure the proof is non-nil for the corresponding version
p, err = s.rootStore.GetProof("", 1, []byte("foo"))
s.Require().NoError(err)
s.Require().NotNil(p)
s.Require().Equal([]byte("foo"), p.GetExist().Key)
s.Require().Equal([]byte("bar"), p.GetExist().Value)
}
func (s *RootStoreTestSuite) TestBranch() {
// write and commit a changeset
bs := s.rootStore.GetKVStore("")
bs.Set([]byte("foo"), []byte("bar"))
workingHash, err := s.rootStore.WorkingHash()
s.Require().NoError(err)
s.Require().NotNil(workingHash)
commitHash, err := s.rootStore.Commit()
s.Require().NoError(err)
s.Require().NotNil(commitHash)
s.Require().Equal(workingHash, commitHash)
// branch the root store
rs2 := s.rootStore.Branch()
// ensure we can perform reads which pass through to the original root store
bs2 := rs2.GetKVStore("")
s.Require().Equal([]byte("bar"), bs2.Get([]byte("foo")))
// make a change to the branched root store
bs2.Set([]byte("foo"), []byte("updated_bar"))
// ensure the original root store is not modified
s.Require().Equal([]byte("bar"), bs.Get([]byte("foo")))
// write changes
rs2.Write()
// ensure changes are reflected in the original root store
s.Require().Equal([]byte("updated_bar"), bs.Get([]byte("foo")))
}
func (s *RootStoreTestSuite) TestMultiBranch() {
// write and commit a changeset
bs := s.rootStore.GetKVStore("")
bs.Set([]byte("foo"), []byte("bar"))
workingHash, err := s.rootStore.WorkingHash()
s.Require().NoError(err)
s.Require().NotNil(workingHash)
commitHash, err := s.rootStore.Commit()
s.Require().NoError(err)
s.Require().NotNil(commitHash)
s.Require().Equal(workingHash, commitHash)
// create multiple branches of the root store
var branchedRootStores []store.BranchedRootStore
for i := 0; i < 5; i++ {
branchedRootStores = append(branchedRootStores, s.rootStore.Branch())
}
// get the last branched root store
rs2 := branchedRootStores[4]
// ensure we can perform reads which pass through to the original root store
bs2 := rs2.GetKVStore("")
s.Require().Equal([]byte("bar"), bs2.Get([]byte("foo")))
// make a change to the branched root store
bs2.Set([]byte("foo"), []byte("updated_bar"))
// ensure the original root store is not modified
s.Require().Equal([]byte("bar"), bs.Get([]byte("foo")))
// write changes
rs2.Write()
// ensure changes are reflected in the original root store
s.Require().Equal([]byte("updated_bar"), bs.Get([]byte("foo")))
}
func (s *RootStoreTestSuite) TestCommit() {
lv, err := s.rootStore.GetLatestVersion()
s.Require().NoError(err)
s.Require().Zero(lv)
// branch the root store
rs2 := s.rootStore.Branch()
// perform changes
bs2 := rs2.GetKVStore("")
for i := 0; i < 100; i++ {
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
bs2.Set([]byte(key), []byte(val))
}
// write to the branched root store, which will flush to the parent root store
rs2.Write()
// committing w/o calling WorkingHash should error
_, err = s.rootStore.Commit()
s.Require().Error(err)
// execute WorkingHash and Commit
wHash, err := s.rootStore.WorkingHash()
s.Require().NoError(err)
cHash, err := s.rootStore.Commit()
s.Require().NoError(err)
s.Require().Equal(wHash, cHash)
// ensure latest version is updated
lv, err = s.rootStore.GetLatestVersion()
s.Require().NoError(err)
s.Require().Equal(uint64(1), lv)
// ensure the root KVStore is cleared
s.Require().Empty(s.rootStore.(*Store).rootKVStore.GetChangeset().Pairs)
// perform reads on the updated root store
bs := s.rootStore.GetKVStore("")
for i := 0; i < 100; i++ {
key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099
val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099
s.Require().Equal([]byte(val), bs.Get([]byte(key)))
}
}

View File

@ -1,49 +0,0 @@
package rootmulti
import (
"cosmossdk.io/store/dbadapter"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/types"
)
var commithash = []byte("FAKE_HASH")
var (
_ types.KVStore = (*commitDBStoreAdapter)(nil)
_ types.Committer = (*commitDBStoreAdapter)(nil)
)
//----------------------------------------
// commitDBStoreWrapper should only be used for simulation/debugging,
// as it doesn't compute any commit hash, and it cannot load older state.
// Wrapper type for dbm.Db with implementation of KVStore
type commitDBStoreAdapter struct {
dbadapter.Store
}
func (cdsa commitDBStoreAdapter) Commit() types.CommitID {
return types.CommitID{
Version: -1,
Hash: commithash,
}
}
func (cdsa commitDBStoreAdapter) LastCommitID() types.CommitID {
return types.CommitID{
Version: -1,
Hash: commithash,
}
}
func (cdsa commitDBStoreAdapter) WorkingHash() []byte {
return commithash
}
func (cdsa commitDBStoreAdapter) SetPruning(_ pruningtypes.PruningOptions) {}
// GetPruning is a no-op as pruning options cannot be directly set on this store.
// They must be set on the root commit multi-store.
func (cdsa commitDBStoreAdapter) GetPruning() pruningtypes.PruningOptions {
return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)
}

View File

@ -1,27 +0,0 @@
package rootmulti
import (
"github.com/cometbft/cometbft/crypto/merkle"
storetypes "cosmossdk.io/store/types"
)
// RequireProof returns whether proof is required for the subpath.
func RequireProof(subpath string) bool {
// XXX: create a better convention.
// Currently, only when query subpath is "/key", will proof be included in
// response. If there are some changes about proof building in iavlstore.go,
// we must change code here to keep consistency with iavlStore#Query.
return subpath == "/key"
}
//-----------------------------------------------------------------------------
// XXX: This should be managed by the rootMultiStore which may want to register
// more proof ops?
func DefaultProofRuntime() (prt *merkle.ProofRuntime) {
prt = merkle.NewProofRuntime()
prt.RegisterOpDecoder(storetypes.ProofOpIAVLCommitment, storetypes.CommitmentOpDecoder)
prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder)
return
}

View File

@ -1,152 +0,0 @@
package rootmulti
import (
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/iavl"
"cosmossdk.io/store/metrics"
"cosmossdk.io/store/types"
)
func TestVerifyIAVLStoreQueryProof(t *testing.T) {
// Create main tree for testing.
db := dbm.NewMemDB()
iStore, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
store := iStore.(*iavl.Store)
require.Nil(t, err)
store.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit()
// Get Proof
res, err := store.Query(&types.RequestQuery{
Path: "/key", // required path to get key/value+proof
Data: []byte("MYKEY"),
Prove: true,
})
require.NoError(t, err)
require.NotNil(t, res.ProofOps)
// Verify proof.
prt := DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE"))
require.Nil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY_NOT", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE_NOT"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte(nil))
require.NotNil(t, err)
}
func TestVerifyMultiStoreQueryProof(t *testing.T) {
// Create main tree for testing.
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
iavlStoreKey := types.NewKVStoreKey("iavlStoreKey")
store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil)
require.NoError(t, store.LoadVersion(0))
iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store)
iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit()
// Get Proof
res, err := store.Query(&types.RequestQuery{
Path: "/iavlStoreKey/key", // required path to get key/value+proof
Data: []byte("MYKEY"),
Prove: true,
})
require.NoError(t, err)
require.NotNil(t, res.ProofOps)
// Verify proof.
prt := DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE"))
require.Nil(t, err)
// Verify proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE"))
require.Nil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY_NOT", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "iavlStoreKey/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE_NOT"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte(nil))
require.NotNil(t, err)
}
func TestVerifyMultiStoreQueryProofAbsence(t *testing.T) {
// Create main tree for testing.
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
iavlStoreKey := types.NewKVStoreKey("iavlStoreKey")
store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil)
err := store.LoadVersion(0)
require.NoError(t, err)
iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store)
iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit() // Commit with empty iavl store.
// Get Proof
res, err := store.Query(&types.RequestQuery{
Path: "/iavlStoreKey/key", // required path to get key/value+proof
Data: []byte("MYABSENTKEY"),
Prove: true,
})
require.NoError(t, err)
require.NotNil(t, res.ProofOps)
// Verify proof.
prt := DefaultProofRuntime()
err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY")
require.Nil(t, err)
// Verify (bad) proof.
prt = DefaultProofRuntime()
err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/MYABSENTKEY")
require.NotNil(t, err)
// Verify (bad) proof.
prt = DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY", []byte(""))
require.NotNil(t, err)
}

View File

@ -1,321 +0,0 @@
package rootmulti_test
import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math/rand"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/iavl"
"cosmossdk.io/store/metrics"
"cosmossdk.io/store/rootmulti"
"cosmossdk.io/store/snapshots"
snapshottypes "cosmossdk.io/store/snapshots/types"
"cosmossdk.io/store/types"
)
func newMultiStoreWithGeneratedData(db dbm.DB, stores uint8, storeKeys uint64) *rootmulti.Store {
multiStore := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
r := rand.New(rand.NewSource(49872768940)) // Fixed seed for deterministic tests
keys := []*types.KVStoreKey{}
for i := uint8(0); i < stores; i++ {
key := types.NewKVStoreKey(fmt.Sprintf("store%v", i))
multiStore.MountStoreWithDB(key, types.StoreTypeIAVL, nil)
keys = append(keys, key)
}
err := multiStore.LoadLatestVersion()
if err != nil {
panic(err)
}
for _, key := range keys {
store := multiStore.GetCommitKVStore(key).(*iavl.Store)
for i := uint64(0); i < storeKeys; i++ {
k := make([]byte, 8)
v := make([]byte, 1024)
binary.BigEndian.PutUint64(k, i)
_, err := r.Read(v)
if err != nil {
panic(err)
}
store.Set(k, v)
}
}
multiStore.Commit()
err = multiStore.LoadLatestVersion()
if err != nil {
panic(err)
}
return multiStore
}
func newMultiStoreWithMixedMounts(db dbm.DB) *rootmulti.Store {
store := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("iavl3"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil)
if err := store.LoadLatestVersion(); err != nil {
panic(err)
}
return store
}
func newMultiStoreWithMixedMountsAndBasicData(db dbm.DB) *rootmulti.Store {
store := newMultiStoreWithMixedMounts(db)
store1 := store.GetStoreByName("iavl1").(types.CommitKVStore)
store2 := store.GetStoreByName("iavl2").(types.CommitKVStore)
trans1 := store.GetStoreByName("trans1").(types.KVStore)
store1.Set([]byte("a"), []byte{1})
store1.Set([]byte("b"), []byte{1})
store2.Set([]byte("X"), []byte{255})
store2.Set([]byte("A"), []byte{101})
trans1.Set([]byte("x1"), []byte{91})
store.Commit()
store1.Set([]byte("b"), []byte{2})
store1.Set([]byte("c"), []byte{3})
store2.Set([]byte("B"), []byte{102})
store.Commit()
store2.Set([]byte("C"), []byte{103})
store2.Delete([]byte("X"))
trans1.Set([]byte("x2"), []byte{92})
store.Commit()
return store
}
func assertStoresEqual(t *testing.T, expect, actual types.CommitKVStore, msgAndArgs ...interface{}) {
t.Helper()
assert.Equal(t, expect.LastCommitID(), actual.LastCommitID())
expectIter := expect.Iterator(nil, nil)
expectMap := map[string][]byte{}
for ; expectIter.Valid(); expectIter.Next() {
expectMap[string(expectIter.Key())] = expectIter.Value()
}
require.NoError(t, expectIter.Error())
actualIter := expect.Iterator(nil, nil)
actualMap := map[string][]byte{}
for ; actualIter.Valid(); actualIter.Next() {
actualMap[string(actualIter.Key())] = actualIter.Value()
}
require.NoError(t, actualIter.Error())
assert.Equal(t, expectMap, actualMap, msgAndArgs...)
}
func TestMultistoreSnapshot_Checksum(t *testing.T) {
// Chunks from different nodes must fit together, so all nodes must produce identical chunks.
// This checksum test makes sure that the byte stream remains identical. If the test fails
// without having changed the data (e.g. because the Protobuf or zlib encoding changes),
// snapshottypes.CurrentFormat must be bumped.
store := newMultiStoreWithGeneratedData(dbm.NewMemDB(), 5, 10000)
version := uint64(store.LastCommitID().Version)
testcases := []struct {
format uint32
chunkHashes []string
}{
{1, []string{
"503e5b51b657055b77e88169fadae543619368744ad15f1de0736c0a20482f24",
"e1a0daaa738eeb43e778aefd2805e3dd720798288a410b06da4b8459c4d8f72e",
"aa048b4ee0f484965d7b3b06822cf0772cdcaad02f3b1b9055e69f2cb365ef3c",
"7921eaa3ed4921341e504d9308a9877986a879fe216a099c86e8db66fcba4c63",
"a4a864e6c02c9fca5837ec80dc84f650b25276ed7e4820cf7516ced9f9901b86",
"980925390cc50f14998ecb1e87de719ca9dd7e72f5fefbe445397bf670f36c31",
}},
}
for _, tc := range testcases {
tc := tc
t.Run(fmt.Sprintf("Format %v", tc.format), func(t *testing.T) {
ch := make(chan io.ReadCloser)
go func() {
streamWriter := snapshots.NewStreamWriter(ch)
defer streamWriter.Close()
require.NotNil(t, streamWriter)
err := store.Snapshot(version, streamWriter)
require.NoError(t, err)
}()
hashes := []string{}
hasher := sha256.New()
for chunk := range ch {
hasher.Reset()
_, err := io.Copy(hasher, chunk)
require.NoError(t, err)
hashes = append(hashes, hex.EncodeToString(hasher.Sum(nil)))
}
assert.Equal(t, tc.chunkHashes, hashes,
"Snapshot output for format %v has changed", tc.format)
})
}
}
func TestMultistoreSnapshot_Errors(t *testing.T) {
store := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB())
testcases := map[string]struct {
height uint64
expectType error
}{
"0 height": {0, nil},
"unknown height": {9, nil},
}
for name, tc := range testcases {
tc := tc
t.Run(name, func(t *testing.T) {
err := store.Snapshot(tc.height, nil)
require.Error(t, err)
if tc.expectType != nil {
assert.True(t, errors.Is(err, tc.expectType))
}
})
}
}
func TestMultistoreSnapshotRestore(t *testing.T) {
source := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB())
target := newMultiStoreWithMixedMounts(dbm.NewMemDB())
version := uint64(source.LastCommitID().Version)
require.EqualValues(t, 3, version)
dummyExtensionItem := snapshottypes.SnapshotItem{
Item: &snapshottypes.SnapshotItem_Extension{
Extension: &snapshottypes.SnapshotExtensionMeta{
Name: "test",
Format: 1,
},
},
}
chunks := make(chan io.ReadCloser, 100)
go func() {
streamWriter := snapshots.NewStreamWriter(chunks)
require.NotNil(t, streamWriter)
defer streamWriter.Close()
err := source.Snapshot(version, streamWriter)
require.NoError(t, err)
// write an extension metadata
err = streamWriter.WriteMsg(&dummyExtensionItem)
require.NoError(t, err)
}()
streamReader, err := snapshots.NewStreamReader(chunks)
require.NoError(t, err)
nextItem, err := target.Restore(version, snapshottypes.CurrentFormat, streamReader)
require.NoError(t, err)
require.Equal(t, *dummyExtensionItem.GetExtension(), *nextItem.GetExtension())
assert.Equal(t, source.LastCommitID(), target.LastCommitID())
for _, key := range source.StoreKeysByName() {
sourceStore := source.GetStoreByName(key.Name()).(types.CommitKVStore)
targetStore := target.GetStoreByName(key.Name()).(types.CommitKVStore)
switch sourceStore.GetStoreType() {
case types.StoreTypeTransient:
assert.False(t, targetStore.Iterator(nil, nil).Valid(),
"transient store %v not empty", key.Name())
default:
assertStoresEqual(t, sourceStore, targetStore, "store %q not equal", key.Name())
}
}
}
func benchmarkMultistoreSnapshot(b *testing.B, stores uint8, storeKeys uint64) {
b.Helper()
b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.")
b.ReportAllocs()
b.StopTimer()
source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys)
version := source.LastCommitID().Version
require.EqualValues(b, 1, version)
b.StartTimer()
for i := 0; i < b.N; i++ {
target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics())
for _, key := range source.StoreKeysByName() {
target.MountStoreWithDB(key, types.StoreTypeIAVL, nil)
}
err := target.LoadLatestVersion()
require.NoError(b, err)
require.EqualValues(b, 0, target.LastCommitID().Version)
chunks := make(chan io.ReadCloser)
go func() {
streamWriter := snapshots.NewStreamWriter(chunks)
require.NotNil(b, streamWriter)
err := source.Snapshot(uint64(version), streamWriter)
require.NoError(b, err)
}()
for reader := range chunks {
_, err := io.Copy(io.Discard, reader)
require.NoError(b, err)
err = reader.Close()
require.NoError(b, err)
}
}
}
func benchmarkMultistoreSnapshotRestore(b *testing.B, stores uint8, storeKeys uint64) {
b.Helper()
b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.")
b.ReportAllocs()
b.StopTimer()
source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys)
version := uint64(source.LastCommitID().Version)
require.EqualValues(b, 1, version)
b.StartTimer()
for i := 0; i < b.N; i++ {
target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics())
for _, key := range source.StoreKeysByName() {
target.MountStoreWithDB(key, types.StoreTypeIAVL, nil)
}
err := target.LoadLatestVersion()
require.NoError(b, err)
require.EqualValues(b, 0, target.LastCommitID().Version)
chunks := make(chan io.ReadCloser)
go func() {
writer := snapshots.NewStreamWriter(chunks)
require.NotNil(b, writer)
err := source.Snapshot(version, writer)
require.NoError(b, err)
}()
reader, err := snapshots.NewStreamReader(chunks)
require.NoError(b, err)
_, err = target.Restore(version, snapshottypes.CurrentFormat, reader)
require.NoError(b, err)
require.Equal(b, source.LastCommitID(), target.LastCommitID())
}
}
func BenchmarkMultistoreSnapshot100K(b *testing.B) {
benchmarkMultistoreSnapshot(b, 10, 10000)
}
func BenchmarkMultistoreSnapshot1M(b *testing.B) {
benchmarkMultistoreSnapshot(b, 10, 100000)
}
func BenchmarkMultistoreSnapshotRestore100K(b *testing.B) {
benchmarkMultistoreSnapshotRestore(b, 10, 10000)
}
func BenchmarkMultistoreSnapshotRestore1M(b *testing.B) {
benchmarkMultistoreSnapshotRestore(b, 10, 100000)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,983 +0,0 @@
package rootmulti
import (
"bytes"
"fmt"
"testing"
"time"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/errors"
"cosmossdk.io/log"
"cosmossdk.io/store/cachemulti"
"cosmossdk.io/store/iavl"
sdkmaps "cosmossdk.io/store/internal/maps"
"cosmossdk.io/store/metrics"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/types"
)
func TestStoreType(t *testing.T) {
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, db)
}
func TestGetCommitKVStore(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningDefault))
err := ms.LoadLatestVersion()
require.Nil(t, err)
key := ms.keysByName["store1"]
store1 := ms.GetCommitKVStore(key)
require.NotNil(t, store1)
require.IsType(t, &iavl.Store{}, store1)
store2 := ms.GetCommitStore(key)
require.NotNil(t, store2)
require.IsType(t, &iavl.Store{}, store2)
}
func TestStoreMount(t *testing.T) {
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
key1 := types.NewKVStoreKey("store1")
key2 := types.NewKVStoreKey("store2")
dup1 := types.NewKVStoreKey("store1")
require.NotPanics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) })
require.NotPanics(t, func() { store.MountStoreWithDB(key2, types.StoreTypeIAVL, db) })
require.Panics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) })
require.Panics(t, func() { store.MountStoreWithDB(nil, types.StoreTypeIAVL, db) })
require.Panics(t, func() { store.MountStoreWithDB(dup1, types.StoreTypeIAVL, db) })
}
func TestCacheMultiStore(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
cacheMulti := ms.CacheMultiStore()
require.IsType(t, cachemulti.Store{}, cacheMulti)
}
func TestCacheMultiStoreWithVersion(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := ms.LoadLatestVersion()
require.Nil(t, err)
commitID := types.CommitID{}
checkStore(t, ms, commitID, commitID)
k, v := []byte("wind"), []byte("blows")
store1 := ms.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
cID := ms.Commit()
require.Equal(t, int64(1), cID.Version)
// require no failure when given an invalid or pruned version
_, err = ms.CacheMultiStoreWithVersion(cID.Version + 1)
require.Error(t, err)
// require a valid version can be cache-loaded
cms, err := ms.CacheMultiStoreWithVersion(cID.Version)
require.NoError(t, err)
// require a valid key lookup yields the correct value
kvStore := cms.GetKVStore(ms.keysByName["store1"])
require.NotNil(t, kvStore)
require.Equal(t, kvStore.Get(k), v)
// add new module stores (store4 and store5) to multi stores and commit
ms.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil)
ms.MountStoreWithDB(types.NewKVStoreKey("store5"), types.StoreTypeIAVL, nil)
err = ms.LoadLatestVersionAndUpgrade(&types.StoreUpgrades{Added: []string{"store4", "store5"}})
require.NoError(t, err)
ms.Commit()
// cache multistore of version before adding store4 should works
_, err = ms.CacheMultiStoreWithVersion(1)
require.NoError(t, err)
// require we cannot commit (write) to a cache-versioned multi-store
require.Panics(t, func() {
kvStore.Set(k, []byte("newValue"))
cms.Write()
})
}
func TestHashStableWithEmptyCommit(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := ms.LoadLatestVersion()
require.Nil(t, err)
commitID := types.CommitID{}
checkStore(t, ms, commitID, commitID)
k, v := []byte("wind"), []byte("blows")
store1 := ms.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
workingHash := ms.WorkingHash()
cID := ms.Commit()
require.Equal(t, int64(1), cID.Version)
hash := cID.Hash
require.Equal(t, workingHash, hash)
// make an empty commit, it should update version, but not affect hash
workingHash = ms.WorkingHash()
cID = ms.Commit()
require.Equal(t, workingHash, cID.Hash)
require.Equal(t, int64(2), cID.Version)
require.Equal(t, hash, cID.Hash)
}
func TestMultistoreCommitLoad(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := store.LoadLatestVersion()
require.Nil(t, err)
// New store has empty last commit.
commitID := types.CommitID{}
checkStore(t, store, commitID, commitID)
// Make sure we can get stores by name.
s1 := store.GetStoreByName("store1")
require.NotNil(t, s1)
s3 := store.GetStoreByName("store3")
require.NotNil(t, s3)
s77 := store.GetStoreByName("store77")
require.Nil(t, s77)
// Make a few commits and check them.
nCommits := int64(3)
for i := int64(0); i < nCommits; i++ {
workingHash := store.WorkingHash()
commitID = store.Commit()
require.Equal(t, workingHash, commitID.Hash)
expectedCommitID := getExpectedCommitID(store, i+1)
checkStore(t, store, expectedCommitID, commitID)
}
// Load the latest multistore again and check version.
store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = store.LoadLatestVersion()
require.Nil(t, err)
commitID = getExpectedCommitID(store, nCommits)
checkStore(t, store, commitID, commitID)
// Commit and check version.
workingHash := store.WorkingHash()
commitID = store.Commit()
require.Equal(t, workingHash, commitID.Hash)
expectedCommitID := getExpectedCommitID(store, nCommits+1)
checkStore(t, store, expectedCommitID, commitID)
// Load an older multistore and check version.
ver := nCommits - 1
store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = store.LoadVersion(ver)
require.Nil(t, err)
commitID = getExpectedCommitID(store, ver)
checkStore(t, store, commitID, commitID)
}
func TestMultistoreLoadWithUpgrade(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := store.LoadLatestVersion()
require.Nil(t, err)
// write some data in all stores
k1, v1 := []byte("first"), []byte("store")
s1, _ := store.GetStoreByName("store1").(types.KVStore)
require.NotNil(t, s1)
s1.Set(k1, v1)
k2, v2 := []byte("second"), []byte("restore")
s2, _ := store.GetStoreByName("store2").(types.KVStore)
require.NotNil(t, s2)
s2.Set(k2, v2)
k3, v3 := []byte("third"), []byte("dropped")
s3, _ := store.GetStoreByName("store3").(types.KVStore)
require.NotNil(t, s3)
s3.Set(k3, v3)
s4, _ := store.GetStoreByName("store4").(types.KVStore)
require.Nil(t, s4)
// do one commit
workingHash := store.WorkingHash()
commitID := store.Commit()
require.Equal(t, workingHash, commitID.Hash)
expectedCommitID := getExpectedCommitID(store, 1)
checkStore(t, store, expectedCommitID, commitID)
ci, err := store.GetCommitInfo(1)
require.NoError(t, err)
require.Equal(t, int64(1), ci.Version)
require.Equal(t, 3, len(ci.StoreInfos))
checkContains(t, ci.StoreInfos, []string{"store1", "store2", "store3"})
// Load without changes and make sure it is sensible
store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = store.LoadLatestVersion()
require.Nil(t, err)
commitID = getExpectedCommitID(store, 1)
checkStore(t, store, commitID, commitID)
// let's query data to see it was saved properly
s2, _ = store.GetStoreByName("store2").(types.KVStore)
require.NotNil(t, s2)
require.Equal(t, v2, s2.Get(k2))
// now, let's load with upgrades...
restore, upgrades := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = restore.LoadLatestVersionAndUpgrade(upgrades)
require.Nil(t, err)
// s1 was not changed
s1, _ = restore.GetStoreByName("store1").(types.KVStore)
require.NotNil(t, s1)
require.Equal(t, v1, s1.Get(k1))
// store3 is mounted, but data deleted are gone
s3, _ = restore.GetStoreByName("store3").(types.KVStore)
require.NotNil(t, s3)
require.Nil(t, s3.Get(k3)) // data was deleted
// store4 is mounted, with empty data
s4, _ = restore.GetStoreByName("store4").(types.KVStore)
require.NotNil(t, s4)
iterator := s4.Iterator(nil, nil)
values := 0
for ; iterator.Valid(); iterator.Next() {
values++
}
require.Zero(t, values)
require.NoError(t, iterator.Close())
// write something inside store4
k4, v4 := []byte("fourth"), []byte("created")
s4.Set(k4, v4)
// store2 is no longer mounted
st2 := restore.GetStoreByName("store2")
require.Nil(t, st2)
// restore2 has the old data
rs2, _ := restore.GetStoreByName("restore2").(types.KVStore)
require.NotNil(t, rs2)
require.Equal(t, v2, rs2.Get(k2))
// store this migrated data, and load it again without migrations
migratedID := restore.Commit()
require.Equal(t, migratedID.Version, int64(2))
reload, _ := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
// unmount store3 since store3 was deleted
unmountStore(reload, "store3")
rs3, _ := reload.GetStoreByName("store3").(types.KVStore)
require.Nil(t, rs3)
err = reload.LoadLatestVersion()
require.Nil(t, err)
require.Equal(t, migratedID, reload.LastCommitID())
// query this new store
rl1, _ := reload.GetStoreByName("store1").(types.KVStore)
require.NotNil(t, rl1)
require.Equal(t, v1, rl1.Get(k1))
rl2, _ := reload.GetStoreByName("restore2").(types.KVStore)
require.NotNil(t, rl2)
require.Equal(t, v2, rl2.Get(k2))
rl4, _ := reload.GetStoreByName("store4").(types.KVStore)
require.NotNil(t, rl4)
require.Equal(t, v4, rl4.Get(k4))
// check commitInfo in storage
ci, err = reload.GetCommitInfo(2)
require.NoError(t, err)
require.Equal(t, int64(2), ci.Version)
require.Equal(t, 3, len(ci.StoreInfos), ci.StoreInfos)
checkContains(t, ci.StoreInfos, []string{"store1", "restore2", "store4"})
}
func TestParsePath(t *testing.T) {
_, _, err := parsePath("foo")
require.Error(t, err)
store, subpath, err := parsePath("/foo")
require.NoError(t, err)
require.Equal(t, store, "foo")
require.Equal(t, subpath, "")
store, subpath, err = parsePath("/fizz/bang/baz")
require.NoError(t, err)
require.Equal(t, store, "fizz")
require.Equal(t, subpath, "/bang/baz")
substore, subsubpath, err := parsePath(subpath)
require.NoError(t, err)
require.Equal(t, substore, "bang")
require.Equal(t, subsubpath, "/baz")
}
func TestMultiStoreRestart(t *testing.T) {
db := dbm.NewMemDB()
pruning := pruningtypes.NewCustomPruningOptions(2, 1)
multi := newMultiStoreWithMounts(db, pruning)
err := multi.LoadLatestVersion()
require.Nil(t, err)
initCid := multi.LastCommitID()
k, v := "wind", "blows"
k2, v2 := "water", "flows"
k3, v3 := "fire", "burns"
for i := 1; i < 3; i++ {
// Set and commit data in one store.
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, i)))
// ... and another.
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, i)))
// ... and another.
store3 := multi.GetStoreByName("store3").(types.KVStore)
store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, i)))
multi.Commit()
cinfo, err := multi.GetCommitInfo(int64(i))
require.NoError(t, err)
require.Equal(t, int64(i), cinfo.Version)
}
// Set and commit data in one store.
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, 3)))
// ... and another.
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, 3)))
multi.Commit()
flushedCinfo, err := multi.GetCommitInfo(3)
require.Nil(t, err)
require.NotEqual(t, initCid, flushedCinfo, "CID is different after flush to disk")
// ... and another.
store3 := multi.GetStoreByName("store3").(types.KVStore)
store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, 3)))
multi.Commit()
postFlushCinfo, err := multi.GetCommitInfo(4)
require.NoError(t, err)
require.Equal(t, int64(4), postFlushCinfo.Version, "Commit changed after in-memory commit")
multi = newMultiStoreWithMounts(db, pruning)
err = multi.LoadLatestVersion()
require.Nil(t, err)
reloadedCid := multi.LastCommitID()
require.Equal(t, int64(4), reloadedCid.Version, "Reloaded CID is not the same as last flushed CID")
// Check that store1 and store2 retained date from 3rd commit
store1 = multi.GetStoreByName("store1").(types.KVStore)
val := store1.Get([]byte(k))
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v, 3)), val, "Reloaded value not the same as last flushed value")
store2 = multi.GetStoreByName("store2").(types.KVStore)
val2 := store2.Get([]byte(k2))
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v2, 3)), val2, "Reloaded value not the same as last flushed value")
// Check that store3 still has data from last commit even though update happened on 2nd commit
store3 = multi.GetStoreByName("store3").(types.KVStore)
val3 := store3.Get([]byte(k3))
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v3, 3)), val3, "Reloaded value not the same as last flushed value")
}
func TestMultiStoreQuery(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := multi.LoadLatestVersion()
require.Nil(t, err)
k, v := []byte("wind"), []byte("blows")
k2, v2 := []byte("water"), []byte("flows")
// v3 := []byte("is cold")
// Commit the multistore.
_ = multi.Commit()
// Make sure we can get by name.
garbage := multi.GetStoreByName("bad-name")
require.Nil(t, garbage)
// Set and commit data in one store.
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
// ... and another.
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set(k2, v2)
// Commit the multistore.
cid := multi.Commit()
ver := cid.Version
// Reload multistore from database
multi = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = multi.LoadLatestVersion()
require.Nil(t, err)
// Test bad path.
query := types.RequestQuery{Path: "/key", Data: k, Height: ver}
_, err = multi.Query(&query)
codespace, code, _ := errors.ABCIInfo(err, false)
require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code)
require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace)
query.Path = "h897fy32890rf63296r92"
_, err = multi.Query(&query)
codespace, code, _ = errors.ABCIInfo(err, false)
require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code)
require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace)
// Test invalid store name.
query.Path = "/garbage/key"
_, err = multi.Query(&query)
codespace, code, _ = errors.ABCIInfo(err, false)
require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code)
require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace)
// Test valid query with data.
query.Path = "/store1/key"
qres, err := multi.Query(&query)
require.NoError(t, err)
require.Equal(t, v, qres.Value)
// Test valid but empty query.
query.Path = "/store2/key"
query.Prove = true
qres, err = multi.Query(&query)
require.NoError(t, err)
require.Nil(t, qres.Value)
// Test store2 data.
// Since we are using the request as a reference, the path will be modified.
query.Data = k2
query.Path = "/store2/key"
qres, err = multi.Query(&query)
require.NoError(t, err)
require.Equal(t, v2, qres.Value)
}
func TestMultiStore_Pruning(t *testing.T) {
testCases := []struct {
name string
numVersions int64
po pruningtypes.PruningOptions
deleted []int64
saved []int64
}{
{"prune nothing", 10, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
{"prune everything", 12, pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []int64{1, 2, 3, 4, 5, 6, 7}, []int64{8, 9, 10, 11, 12}},
{"prune some; no batch", 10, pruningtypes.NewCustomPruningOptions(2, 1), []int64{1, 2, 3, 4, 6, 5, 7}, []int64{8, 9, 10}},
{"prune some; small batch", 10, pruningtypes.NewCustomPruningOptions(2, 3), []int64{1, 2, 3, 4, 5, 6}, []int64{7, 8, 9, 10}},
{"prune some; large batch", 10, pruningtypes.NewCustomPruningOptions(2, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, tc.po)
require.NoError(t, ms.LoadLatestVersion())
for i := int64(0); i < tc.numVersions; i++ {
ms.Commit()
}
for _, v := range tc.saved {
_, err := ms.CacheMultiStoreWithVersion(v)
require.NoError(t, err, "expected no error when loading height: %d", v)
}
for _, v := range tc.deleted {
_, err := ms.CacheMultiStoreWithVersion(v)
require.Error(t, err, "expected error when loading height: %d", v)
}
})
}
}
func TestMultiStore_Pruning_SameHeightsTwice(t *testing.T) {
const (
numVersions int64 = 10
keepRecent uint64 = 2
interval uint64 = 10
)
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(keepRecent, interval))
require.NoError(t, ms.LoadLatestVersion())
var lastCommitInfo types.CommitID
for i := int64(0); i < numVersions; i++ {
lastCommitInfo = ms.Commit()
}
require.Equal(t, numVersions, lastCommitInfo.Version)
for v := int64(1); v < numVersions-int64(keepRecent); v++ {
err := ms.LoadVersion(v)
require.Error(t, err, "expected error when loading pruned height: %d", v)
}
for v := (numVersions - int64(keepRecent)); v < numVersions; v++ {
err := ms.LoadVersion(v)
require.NoError(t, err, "expected no error when loading height: %d", v)
}
// Get latest
err := ms.LoadVersion(numVersions - 1)
require.NoError(t, err)
// Ensure already pruned snapshot heights were loaded
require.NoError(t, ms.pruningManager.LoadSnapshotHeights(db))
// Test pruning the same heights again
lastCommitInfo = ms.Commit()
require.Equal(t, numVersions, lastCommitInfo.Version)
// Ensure that can commit one more height with no panic
lastCommitInfo = ms.Commit()
require.Equal(t, numVersions+1, lastCommitInfo.Version)
}
func TestMultiStore_PruningRestart(t *testing.T) {
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11))
require.NoError(t, ms.LoadLatestVersion())
// Commit enough to build up heights to prune, where on the next block we should
// batch delete.
for i := int64(0); i < 10; i++ {
ms.Commit()
}
actualHeightToPrune := ms.pruningManager.GetPruningHeight(ms.LatestVersion())
require.Equal(t, int64(0), actualHeightToPrune)
// "restart"
ms = newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11))
err := ms.LoadLatestVersion()
require.NoError(t, err)
actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion())
require.Equal(t, int64(0), actualHeightToPrune)
// commit one more block and ensure the heights have been pruned
ms.Commit()
actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion())
require.Equal(t, int64(8), actualHeightToPrune)
for v := int64(1); v <= actualHeightToPrune; v++ {
_, err := ms.CacheMultiStoreWithVersion(v)
require.Error(t, err, "expected error when loading height: %d", v)
}
}
// TestUnevenStoresHeightCheck tests if loading root store correctly errors when
// there's any module store with the wrong height
func TestUnevenStoresHeightCheck(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := store.LoadLatestVersion()
require.Nil(t, err)
// commit to increment store's height
store.Commit()
// mount store4 to root store
store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil)
// load the stores without upgrades
err = store.LoadLatestVersion()
require.Error(t, err)
// now, let's load with upgrades...
upgrades := &types.StoreUpgrades{
Added: []string{"store4"},
}
err = store.LoadLatestVersionAndUpgrade(upgrades)
require.Nil(t, err)
}
func TestSetInitialVersion(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
require.NoError(t, multi.LoadLatestVersion())
err := multi.SetInitialVersion(5)
require.NoError(t, err)
require.Equal(t, int64(5), multi.initialVersion)
multi.Commit()
require.Equal(t, int64(5), multi.LastCommitID().Version)
ckvs := multi.GetCommitKVStore(multi.keysByName["store1"])
iavlStore, ok := ckvs.(*iavl.Store)
require.True(t, ok)
require.True(t, iavlStore.VersionExists(5))
}
func TestAddListenersAndListeningEnabled(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
testKey := types.NewKVStoreKey("listening_test_key")
enabled := multi.ListeningEnabled(testKey)
require.False(t, enabled)
wrongTestKey := types.NewKVStoreKey("wrong_listening_test_key")
multi.AddListeners([]types.StoreKey{testKey})
enabled = multi.ListeningEnabled(wrongTestKey)
require.False(t, enabled)
enabled = multi.ListeningEnabled(testKey)
require.True(t, enabled)
}
func TestCacheWraps(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
cacheWrapper := multi.CacheWrap()
require.IsType(t, cachemulti.Store{}, cacheWrapper)
cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil)
require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace)
}
func TestTraceConcurrency(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := multi.LoadLatestVersion()
require.NoError(t, err)
b := &bytes.Buffer{}
key := multi.keysByName["store1"]
tc := types.TraceContext(map[string]interface{}{"blockHeight": 64})
multi.SetTracer(b)
multi.SetTracingContext(tc)
cms := multi.CacheMultiStore()
store1 := cms.GetKVStore(key)
cw := store1.CacheWrapWithTrace(b, tc)
_ = cw
require.NotNil(t, store1)
stop := make(chan struct{})
stopW := make(chan struct{})
go func(stop chan struct{}) {
for {
select {
case <-stop:
return
default:
store1.Set([]byte{1}, []byte{1})
cms.Write()
}
}
}(stop)
go func(stop chan struct{}) {
for {
select {
case <-stop:
return
default:
multi.SetTracingContext(tc)
}
}
}(stopW)
time.Sleep(3 * time.Second)
stop <- struct{}{}
stopW <- struct{}{}
}
func TestCommitOrdered(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := multi.LoadLatestVersion()
require.Nil(t, err)
commitID := types.CommitID{}
checkStore(t, multi, commitID, commitID)
k, v := []byte("wind"), []byte("blows")
k2, v2 := []byte("water"), []byte("flows")
k3, v3 := []byte("fire"), []byte("burns")
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set(k2, v2)
store3 := multi.GetStoreByName("store3").(types.KVStore)
store3.Set(k3, v3)
typeID := multi.Commit()
require.Equal(t, int64(1), typeID.Version)
ci, err := multi.GetCommitInfo(1)
require.NoError(t, err)
require.Equal(t, int64(1), ci.Version)
require.Equal(t, 3, len(ci.StoreInfos))
for i, s := range ci.StoreInfos {
require.Equal(t, s.Name, fmt.Sprintf("store%d", i+1))
}
}
//-----------------------------------------------------------------------
// utils
var (
testStoreKey1 = types.NewKVStoreKey("store1")
testStoreKey2 = types.NewKVStoreKey("store2")
testStoreKey3 = types.NewKVStoreKey("store3")
)
func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store {
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.SetPruning(pruningOpts)
store.MountStoreWithDB(testStoreKey1, types.StoreTypeIAVL, nil)
store.MountStoreWithDB(testStoreKey2, types.StoreTypeIAVL, nil)
store.MountStoreWithDB(testStoreKey3, types.StoreTypeIAVL, nil)
return store
}
func newMultiStoreWithModifiedMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) (*Store, *types.StoreUpgrades) {
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.SetPruning(pruningOpts)
store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("restore2"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("store3"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil)
upgrades := &types.StoreUpgrades{
Added: []string{"store4"},
Renamed: []types.StoreRename{{
OldKey: "store2",
NewKey: "restore2",
}},
Deleted: []string{"store3"},
}
return store, upgrades
}
func unmountStore(rootStore *Store, storeKeyName string) {
sk := rootStore.keysByName[storeKeyName]
delete(rootStore.stores, sk)
delete(rootStore.storesParams, sk)
delete(rootStore.keysByName, storeKeyName)
}
func checkStore(t *testing.T, store *Store, expect, got types.CommitID) {
t.Helper()
require.Equal(t, expect, got)
require.Equal(t, expect, store.LastCommitID())
}
func checkContains(tb testing.TB, info []types.StoreInfo, wanted []string) {
tb.Helper()
for _, want := range wanted {
checkHas(tb, info, want)
}
}
func checkHas(tb testing.TB, info []types.StoreInfo, want string) {
tb.Helper()
for _, i := range info {
if i.Name == want {
return
}
}
tb.Fatalf("storeInfo doesn't contain %s", want)
}
func getExpectedCommitID(store *Store, ver int64) types.CommitID {
return types.CommitID{
Version: ver,
Hash: hashStores(store.stores),
}
}
func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte {
m := make(map[string][]byte, len(stores))
for key, store := range stores {
name := key.Name()
m[name] = types.StoreInfo{
Name: name,
CommitId: store.LastCommitID(),
}.GetHash()
}
return sdkmaps.HashFromMap(m)
}
type MockListener struct {
stateCache []types.StoreKVPair
}
func (tl *MockListener) OnWrite(storeKey types.StoreKey, key, value []byte, delete bool) error {
tl.stateCache = append(tl.stateCache, types.StoreKVPair{
StoreKey: storeKey.Name(),
Key: key,
Value: value,
Delete: delete,
})
return nil
}
func TestStateListeners(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
require.Empty(t, ms.listeners)
ms.AddListeners([]types.StoreKey{testStoreKey1})
require.Equal(t, 1, len(ms.listeners))
require.NoError(t, ms.LoadLatestVersion())
cacheMulti := ms.CacheMultiStore()
store := cacheMulti.GetKVStore(testStoreKey1)
store.Set([]byte{1}, []byte{1})
require.Empty(t, ms.PopStateCache())
// writes are observed when cache store commit.
cacheMulti.Write()
require.Equal(t, 1, len(ms.PopStateCache()))
// test no listening on unobserved store
store = cacheMulti.GetKVStore(testStoreKey2)
store.Set([]byte{1}, []byte{1})
require.Empty(t, ms.PopStateCache())
// writes are not observed when cache store commit
cacheMulti.Write()
require.Empty(t, ms.PopStateCache())
}
type commitKVStoreStub struct {
types.CommitKVStore
Committed int
}
func (stub *commitKVStoreStub) Commit() types.CommitID {
commitID := stub.CommitKVStore.Commit()
stub.Committed++
return commitID
}
func prepareStoreMap() (map[types.StoreKey]types.CommitKVStore, error) {
var db dbm.DB = dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil)
if err := store.LoadLatestVersion(); err != nil {
return nil, err
}
return map[types.StoreKey]types.CommitKVStore{
testStoreKey1: &commitKVStoreStub{
CommitKVStore: store.GetStoreByName("iavl1").(types.CommitKVStore),
},
testStoreKey2: &commitKVStoreStub{
CommitKVStore: store.GetStoreByName("iavl2").(types.CommitKVStore),
},
testStoreKey3: &commitKVStoreStub{
CommitKVStore: store.GetStoreByName("trans1").(types.CommitKVStore),
},
}, nil
}
func TestCommitStores(t *testing.T) {
testCases := []struct {
name string
committed int
exptectCommit int
}{
{
"when upgrade not get interrupted",
0,
1,
},
{
"when upgrade get interrupted once",
1,
0,
},
{
"when upgrade get interrupted twice",
2,
0,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storeMap, err := prepareStoreMap()
require.NoError(t, err)
store := storeMap[testStoreKey1].(*commitKVStoreStub)
for i := tc.committed; i > 0; i-- {
store.Commit()
}
store.Committed = 0
var version int64 = 1
removalMap := map[types.StoreKey]bool{}
res := commitStores(version, storeMap, removalMap)
for _, s := range res.StoreInfos {
require.Equal(t, version, s.CommitId.Version)
}
require.Equal(t, version, res.Version)
require.Equal(t, tc.exptectCommit, store.Committed)
})
}
}

View File

@ -5,8 +5,8 @@ import (
"math"
"cosmossdk.io/errors"
snapshottypes "cosmossdk.io/store/snapshots/types"
storetypes "cosmossdk.io/store/types"
"cosmossdk.io/store/v2"
snapshottypes "cosmossdk.io/store/v2/snapshots/types"
)
// ChunkWriter reads an input stream, splits it into fixed-size chunks, and writes them to a
@ -72,7 +72,7 @@ func (w *ChunkWriter) CloseWithError(err error) {
// Write implements io.Writer.
func (w *ChunkWriter) Write(data []byte) (int, error) {
if w.closed {
return 0, errors.Wrap(storetypes.ErrLogic, "cannot write to closed ChunkWriter")
return 0, errors.Wrap(store.ErrLogic, "cannot write to closed ChunkWriter")
}
nTotal := 0
for len(data) > 0 {
@ -174,7 +174,7 @@ func ValidRestoreHeight(format uint32, height uint64) error {
}
if height == 0 {
return errors.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0")
return errors.Wrap(store.ErrLogic, "cannot restore snapshot at height 0")
}
if height > uint64(math.MaxInt64) {
return errors.Wrapf(snapshottypes.ErrInvalidMetadata,

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/snapshots"
"cosmossdk.io/store/v2/snapshots"
)
func TestChunkWriter(t *testing.T) {

View File

@ -17,9 +17,8 @@ import (
errorsmod "cosmossdk.io/errors"
"cosmossdk.io/log"
"cosmossdk.io/store/snapshots"
snapshottypes "cosmossdk.io/store/snapshots/types"
"cosmossdk.io/store/types"
"cosmossdk.io/store/v2/snapshots"
snapshottypes "cosmossdk.io/store/v2/snapshots/types"
)
func checksums(slice [][]byte) [][]byte {
@ -302,7 +301,7 @@ func (s *extSnapshotter) SupportedFormats() []uint32 {
func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshottypes.ExtensionPayloadWriter) error {
for _, i := range s.state {
if err := payloadWriter(types.Uint64ToBigEndian(i)); err != nil {
if err := payloadWriter(snapshottypes.Uint64ToBigEndian(i)); err != nil {
return err
}
}
@ -317,7 +316,7 @@ func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadR
} else if err != nil {
return err
}
s.state = append(s.state, types.BigEndianToUint64(payload))
s.state = append(s.state, snapshottypes.BigEndianToUint64(payload))
}
// finalize restoration
return nil

View File

@ -13,8 +13,8 @@ import (
errorsmod "cosmossdk.io/errors"
"cosmossdk.io/log"
"cosmossdk.io/store/snapshots/types"
storetypes "cosmossdk.io/store/types"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/snapshots/types"
)
// Manager manages snapshot and restore operations for an app, making sure only a single
@ -112,10 +112,10 @@ func (m *Manager) begin(op operation) error {
// beginLocked begins an operation while already holding the mutex.
func (m *Manager) beginLocked(op operation) error {
if op == opNone {
return errorsmod.Wrap(storetypes.ErrLogic, "can't begin a none operation")
return errorsmod.Wrap(store.ErrLogic, "can't begin a none operation")
}
if m.operation != opNone {
return errorsmod.Wrapf(storetypes.ErrConflict, "a %v operation is in progress", m.operation)
return errorsmod.Wrapf(store.ErrConflict, "a %v operation is in progress", m.operation)
}
m.operation = op
return nil
@ -161,7 +161,7 @@ func (m *Manager) GetSnapshotBlockRetentionHeights() int64 {
// Create creates a snapshot and returns its metadata.
func (m *Manager) Create(height uint64) (*types.Snapshot, error) {
if m == nil {
return nil, errorsmod.Wrap(storetypes.ErrLogic, "no snapshot store configured")
return nil, errorsmod.Wrap(store.ErrLogic, "no snapshot store configured")
}
defer m.multistore.PruneSnapshotHeight(int64(height))
@ -177,7 +177,7 @@ func (m *Manager) Create(height uint64) (*types.Snapshot, error) {
return nil, errorsmod.Wrap(err, "failed to examine latest snapshot")
}
if latest != nil && latest.Height >= height {
return nil, errorsmod.Wrapf(storetypes.ErrConflict,
return nil, errorsmod.Wrapf(store.ErrConflict,
"a more recent snapshot already exists at height %v", latest.Height)
}
@ -279,7 +279,7 @@ func (m *Manager) Restore(snapshot types.Snapshot) error {
return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format)
}
if snapshot.Height == 0 {
return errorsmod.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0")
return errorsmod.Wrap(store.ErrLogic, "cannot restore snapshot at height 0")
}
if snapshot.Height > uint64(math.MaxInt64) {
return errorsmod.Wrapf(types.ErrInvalidMetadata,
@ -375,11 +375,11 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.
}
metadata := nextItem.GetExtension()
if metadata == nil {
return errorsmod.Wrapf(storetypes.ErrLogic, "unknown snapshot item %T", nextItem.Item)
return errorsmod.Wrapf(store.ErrLogic, "unknown snapshot item %T", nextItem.Item)
}
extension, ok := m.extensions[metadata.Name]
if !ok {
return errorsmod.Wrapf(storetypes.ErrLogic, "unknown extension snapshotter %s", metadata.Name)
return errorsmod.Wrapf(store.ErrLogic, "unknown extension snapshotter %s", metadata.Name)
}
if !IsFormatSupported(extension, metadata.Format) {
return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name)
@ -402,11 +402,11 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.operation != opRestore {
return false, errorsmod.Wrap(storetypes.ErrLogic, "no restore operation in progress")
return false, errorsmod.Wrap(store.ErrLogic, "no restore operation in progress")
}
if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) {
return false, errorsmod.Wrap(storetypes.ErrLogic, "received unexpected chunk")
return false, errorsmod.Wrap(store.ErrLogic, "received unexpected chunk")
}
// Check if any errors have occurred yet.
@ -416,7 +416,7 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) {
if done.err != nil {
return false, done.err
}
return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended unexpectedly")
return false, errorsmod.Wrap(store.ErrLogic, "restore ended unexpectedly")
default:
}
@ -452,7 +452,7 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) {
return false, done.err
}
if !done.complete {
return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended prematurely")
return false, errorsmod.Wrap(store.ErrLogic, "restore ended prematurely")
}
return true, nil

View File

@ -9,8 +9,8 @@ import (
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/snapshots"
"cosmossdk.io/store/snapshots/types"
"cosmossdk.io/store/v2/snapshots"
"cosmossdk.io/store/v2/snapshots/types"
)
var opts = types.NewSnapshotOptions(1500, 2)

View File

@ -15,8 +15,8 @@ import (
"github.com/cosmos/gogoproto/proto"
"cosmossdk.io/errors"
"cosmossdk.io/store/snapshots/types"
storetypes "cosmossdk.io/store/types"
"cosmossdk.io/store/v2"
"cosmossdk.io/store/v2/snapshots/types"
)
const (
@ -36,7 +36,7 @@ type Store struct {
// NewStore creates a new snapshot store.
func NewStore(db db.DB, dir string) (*Store, error) {
if dir == "" {
return nil, errors.Wrap(storetypes.ErrLogic, "snapshot directory not given")
return nil, errors.Wrap(store.ErrLogic, "snapshot directory not given")
}
err := os.MkdirAll(dir, 0o755)
if err != nil {
@ -56,7 +56,7 @@ func (s *Store) Delete(height uint64, format uint32) error {
saving := s.saving[height]
s.mtx.Unlock()
if saving {
return errors.Wrapf(storetypes.ErrConflict,
return errors.Wrapf(store.ErrConflict,
"snapshot for height %v format %v is currently being saved", height, format)
}
err := s.db.DeleteSync(encodeKey(height, format))
@ -227,7 +227,7 @@ func (s *Store) Save(
) (*types.Snapshot, error) {
defer DrainChunks(chunks)
if height == 0 {
return nil, errors.Wrap(storetypes.ErrLogic, "snapshot height cannot be 0")
return nil, errors.Wrap(store.ErrLogic, "snapshot height cannot be 0")
}
s.mtx.Lock()
@ -235,7 +235,7 @@ func (s *Store) Save(
s.saving[height] = true
s.mtx.Unlock()
if saving {
return nil, errors.Wrapf(storetypes.ErrConflict,
return nil, errors.Wrapf(store.ErrConflict,
"a snapshot for height %v is already being saved", height)
}
defer func() {
@ -249,7 +249,7 @@ func (s *Store) Save(
return nil, err
}
if exists {
return nil, errors.Wrapf(storetypes.ErrConflict,
return nil, errors.Wrapf(store.ErrConflict,
"snapshot already exists for height %v format %v", height, format)
}
@ -349,11 +349,12 @@ func (s *Store) PathChunk(height uint64, format, chunk uint32) string {
// decodeKey decodes a snapshot key.
func decodeKey(k []byte) (uint64, uint32, error) {
if len(k) != 13 {
return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key with length %v", len(k))
return 0, 0, errors.Wrapf(store.ErrLogic, "invalid snapshot key with length %v", len(k))
}
if k[0] != keyPrefixSnapshot {
return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key prefix %x", k[0])
return 0, 0, errors.Wrapf(store.ErrLogic, "invalid snapshot key prefix %x", k[0])
}
height := binary.BigEndian.Uint64(k[1:9])
format := binary.BigEndian.Uint32(k[9:13])
return height, format, nil

Some files were not shown because too many files have changed in this diff Show More