diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index fee17ed54d..745f5588a1 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,6 +14,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: DeterminateSystems/nix-installer-action@main + - uses: DeterminateSystems/magic-nix-cache-action@main - uses: actions/setup-go@v4 with: go-version: "1.21" @@ -29,7 +31,7 @@ jobs: if: env.GIT_DIFF id: lint_long run: | - make lint + nix develop -c make lint - uses: technote-space/get-diff-action@v6.1.2 if: steps.lint_long.outcome == 'skipped' id: git_diff_all @@ -43,7 +45,7 @@ jobs: - name: run linting (short) if: steps.lint_long.outcome == 'skipped' && env.GIT_DIFF run: | - make lint + nix develop -c make lint env: GIT_DIFF: ${{ env.GIT_DIFF }} LINT_DIFF: 1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f0d6819d01..868a77a439 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -602,6 +602,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: DeterminateSystems/nix-installer-action@main + - uses: DeterminateSystems/magic-nix-cache-action@main - uses: actions/setup-go@v4 with: go-version: "1.20" @@ -619,7 +621,7 @@ jobs: if: env.GIT_DIFF run: | cd store - go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock rocksdb_build' ./... + nix develop .. -c go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock rocksdb' ./... - name: sonarcloud if: ${{ env.GIT_DIFF && !github.event.pull_request.draft && env.SONAR_TOKEN != null }} uses: SonarSource/sonarcloud-github-action@master @@ -790,9 +792,9 @@ jobs: SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} with: projectBaseDir: x/circuit/ - + test-x-protocolpool: - runs-on: ubuntu-latest + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 @@ -820,7 +822,7 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} with: - projectBaseDir: x/protocolpool/ + projectBaseDir: x/protocolpool/ test-x-feegrant: runs-on: ubuntu-latest diff --git a/.golangci.yml b/.golangci.yml index 6fb6c26934..b748314cd0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -11,6 +11,9 @@ run: - ".*\\.pb\\.gw\\.go$" - ".*\\.pulsar\\.go$" +build-tags: + - rocksdb + linters: disable-all: true enable: diff --git a/client/v2/go.mod b/client/v2/go.mod index 72c9e0983c..ee951d7d71 100644 --- a/client/v2/go.mod +++ b/client/v2/go.mod @@ -99,7 +99,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect diff --git a/client/v2/go.sum b/client/v2/go.sum index f1ce1effa5..6f43701079 100644 --- a/client/v2/go.sum +++ b/client/v2/go.sum @@ -527,8 +527,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/collections/go.mod b/collections/go.mod index 9ec6a608fa..39369b7e80 100644 --- a/collections/go.mod +++ b/collections/go.mod @@ -30,7 +30,7 @@ require ( github.com/klauspost/compress v1.16.5 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/linxGnu/grocksdb v1.7.16 // indirect + github.com/linxGnu/grocksdb v1.8.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/onsi/gomega v1.20.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/collections/go.sum b/collections/go.sum index 5dd6fb29fd..a09e26c2f5 100644 --- a/collections/go.sum +++ b/collections/go.sum @@ -77,8 +77,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8= -github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4= +github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= +github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= diff --git a/core/go.mod b/core/go.mod index 1506941be4..86068bf4a0 100644 --- a/core/go.mod +++ b/core/go.mod @@ -34,7 +34,7 @@ require ( github.com/klauspost/compress v1.16.5 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/linxGnu/grocksdb v1.7.16 // indirect + github.com/linxGnu/grocksdb v1.8.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/core/go.sum b/core/go.sum index c5a229fe23..cf126155b6 100644 --- a/core/go.sum +++ b/core/go.sum @@ -75,8 +75,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8= -github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4= +github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= +github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= diff --git a/flake.nix b/flake.nix index 819c627ce4..ff63646857 100644 --- a/flake.nix +++ b/flake.nix @@ -11,8 +11,17 @@ outputs = { self, nixpkgs, gomod2nix, flake-utils }: { - overlays.default = pkgs: _: { - simd = pkgs.callPackage ./simapp { rev = self.shortRev or "dev"; }; + overlays.default = self: super: { + simd = self.callPackage ./simapp { rev = self.shortRev or "dev"; }; + rocksdb = super.rocksdb.overrideAttrs (_: rec { + version = "8.5.3"; + src = self.fetchFromGitHub { + owner = "facebook"; + repo = "rocksdb"; + rev = "v${version}"; + sha256 = "sha256-Qa4bAprXptA79ilNE5KSfggEDvNFHdrvDQ6SvzWMQus="; + }; + }); }; } // (flake-utils.lib.eachDefaultSystem diff --git a/go.mod b/go.mod index 883dbc36a9..42f39193a6 100644 --- a/go.mod +++ b/go.mod @@ -123,7 +123,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/minio/highwayhash v1.0.2 // indirect diff --git a/go.sum b/go.sum index fecf6a42dc..5ff9fcb524 100644 --- a/go.sum +++ b/go.sum @@ -534,8 +534,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/orm/go.mod b/orm/go.mod index 929b98d6a6..a70d78db29 100644 --- a/orm/go.mod +++ b/orm/go.mod @@ -46,7 +46,7 @@ require ( github.com/klauspost/compress v1.16.5 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/linxGnu/grocksdb v1.7.16 // indirect + github.com/linxGnu/grocksdb v1.8.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/onsi/gomega v1.20.0 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/orm/go.sum b/orm/go.sum index dee104364b..a6964f867d 100644 --- a/orm/go.sum +++ b/orm/go.sum @@ -96,8 +96,8 @@ github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3x github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= -github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8= -github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4= +github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= +github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= diff --git a/simapp/go.mod b/simapp/go.mod index 4532b7e167..16c5b774d1 100644 --- a/simapp/go.mod +++ b/simapp/go.mod @@ -130,7 +130,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/simapp/go.sum b/simapp/go.sum index 5d7941a4ee..e2298cb7a2 100644 --- a/simapp/go.sum +++ b/simapp/go.sum @@ -749,8 +749,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/simapp/gomod2nix.toml b/simapp/gomod2nix.toml index 6d137d11b4..0062769c20 100644 --- a/simapp/gomod2nix.toml +++ b/simapp/gomod2nix.toml @@ -327,8 +327,8 @@ schema = 3 version = "v0.1.0" hash = "sha256-wQqGTtRWsfR9n0O/SXHVgECebbnNmHddxJIbG63OJBQ=" [mod."github.com/linxGnu/grocksdb"] - version = "v1.8.0" - hash = "sha256-o6zj18at4oN6pqSioFqd1AXduR/cx0xDgtL1rPPw+1M=" + version = "v1.8.4" + hash = "sha256-AGQ0YEuwUaev/mxOkiTNgAOLB9c7reHyTmNxXuaRah8=" [mod."github.com/magiconair/properties"] version = "v1.8.7" hash = "sha256-XQ2bnc2s7/IH3WxEO4GishZurMyKwEclZy1DXg+2xXc=" diff --git a/store/batch.go b/store/batch.go new file mode 100644 index 0000000000..aa09237cbc --- /dev/null +++ b/store/batch.go @@ -0,0 +1,17 @@ +package store + +// Batch is a write-only database that commits changes to the underlying database +// when Write is called. A batch cannot be used concurrently. +type Batch interface { + Writer + + // Size retrieves the amount of data queued up for writing, this includes + // the keys, values, and deleted keys. + Size() int + + // Write flushes any accumulated data to disk. + Write() error + + // Reset resets the batch. + Reset() +} diff --git a/store/branchkv/README.md b/store/branchkv/README.md new file mode 100644 index 0000000000..3732cfc850 --- /dev/null +++ b/store/branchkv/README.md @@ -0,0 +1,10 @@ +# branchkv + +The `branchkv.Store` implementation defines a `BranchedKVStore` that contains a +reference to a `VersionedDatabase`, i.e. an SS backend. The `branchkv.Store` is +meant to be used as the primary store used in a `RootStore` implementation. It +provides the ability to get the current `ChangeSet`, branching, and writing to +a parent store (if one is defined). Note, all reads first pass through the +staged, i.e. dirty writes. If a key is not found in the staged writes, the read +is then passed to the parent store (if one is defined), finally falling back to +the backing SS engine. diff --git a/store/branchkv/iterator.go b/store/branchkv/iterator.go new file mode 100644 index 0000000000..77705f014e --- /dev/null +++ b/store/branchkv/iterator.go @@ -0,0 +1,141 @@ +package branchkv + +import ( + "slices" + + "cosmossdk.io/store/v2" +) + +var _ store.Iterator = (*iterator)(nil) + +// iterator walks over both the KVStore's changeset, i.e. dirty writes, and the +// parent iterator, which can either be another KVStore or the SS backend, at the +// same time. +// +// Note, writes that happen on the KVStore over an iterator will not affect the +// iterator. This is because when an iterator is created, it takes a current +// snapshot of the changeset. +type iterator struct { + parentItr store.Iterator + start []byte + end []byte + key []byte + value []byte + keys []string + values []store.KVPair + reverse bool + exhausted bool // exhausted reflects if the parent iterator is exhausted or not +} + +// Domain returns the domain of the iterator. The caller must not modify the +// return values. +func (itr *iterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr *iterator) Key() []byte { + return slices.Clone(itr.key) +} + +func (itr *iterator) Value() []byte { + return slices.Clone(itr.value) +} + +func (itr *iterator) Close() { + itr.key = nil + itr.value = nil + itr.keys = nil + itr.values = nil + itr.parentItr.Close() +} + +func (itr *iterator) Next() bool { + for { + switch { + case itr.exhausted && len(itr.keys) == 0: // exhausted both + itr.key = nil + itr.value = nil + return false + + case itr.exhausted: // exhausted parent iterator but not store (dirty writes) iterator + nextKey := itr.keys[0] + nextValue := itr.values[0] + + // pop off the key + itr.keys[0] = "" + itr.keys = itr.keys[1:] + + // pop off the value + itr.values[0].Value = nil + itr.values = itr.values[1:] + + if nextValue.Value != nil { + itr.key = []byte(nextKey) + itr.value = nextValue.Value + return true + } + + case len(itr.keys) == 0: // exhausted store (dirty writes) iterator but not parent iterator + itr.key = itr.parentItr.Key() + itr.value = itr.parentItr.Value() + itr.exhausted = !itr.parentItr.Next() + + return true + + default: // parent iterator is not exhausted and we have store (dirty writes) remaining + dirtyKey := itr.keys[0] + dirtyVal := itr.values[0] + + parentKey := itr.parentItr.Key() + parentKeyStr := string(parentKey) + + switch { + case (!itr.reverse && dirtyKey < parentKeyStr) || (itr.reverse && dirtyKey > parentKeyStr): // dirty key should come before parent's key + // pop off key + itr.keys[0] = "" + itr.keys = itr.keys[1:] + + // pop off value + itr.values[0].Value = nil + itr.values = itr.values[1:] + + if dirtyVal.Value != nil { + itr.key = []byte(dirtyKey) + itr.value = dirtyVal.Value + return true + } + + case (!itr.reverse && parentKeyStr < dirtyKey) || (itr.reverse && parentKeyStr > dirtyKey): // parent's key should come before dirty key + itr.key = parentKey + itr.value = itr.parentItr.Value() + itr.exhausted = !itr.parentItr.Next() + return true + + default: + // pop off key + itr.keys[0] = "" + itr.keys = itr.keys[1:] + + // pop off value + itr.values[0].Value = nil + itr.values = itr.values[1:] + + itr.exhausted = !itr.parentItr.Next() + + if dirtyVal.Value != nil { + itr.key = []byte(dirtyKey) + itr.value = dirtyVal.Value + return true + } + } + } + } +} + +func (itr *iterator) Valid() bool { + return itr.key != nil && itr.value != nil +} + +func (itr *iterator) Error() error { + return itr.parentItr.Error() +} diff --git a/store/branchkv/store.go b/store/branchkv/store.go new file mode 100644 index 0000000000..0d75cdc08b --- /dev/null +++ b/store/branchkv/store.go @@ -0,0 +1,312 @@ +package branchkv + +import ( + "io" + "slices" + "sync" + + "golang.org/x/exp/maps" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/tracekv" +) + +var _ store.BranchedKVStore = (*Store)(nil) + +// Store implements both a KVStore and BranchedKVStore interfaces. It is used to +// accumulate writes that can be later committed to backing SS and SC engines or +// discarded altogether. If a read is not found through an uncommitted write, it +// will be delegated to the SS backend. +type Store struct { + mu sync.Mutex + + // storage reflects backing storage (SS) for reads that are not found in uncommitted volatile state + storage store.VersionedDatabase + + // version indicates the latest version to handle reads falling through to SS + version uint64 + + // storeKey reflects the store key used for the store + storeKey string + + // parent reflects a parent store if branched (it may be nil) + parent store.KVStore + + // changeset reflects the uncommitted writes to the store + changeset map[string]store.KVPair +} + +func New(storeKey string, ss store.VersionedDatabase) (store.BranchedKVStore, error) { + latestVersion, err := ss.GetLatestVersion() + if err != nil { + return nil, err + } + + return &Store{ + storage: ss, + storeKey: storeKey, + version: latestVersion, + changeset: make(map[string]store.KVPair), + }, nil +} + +func NewWithParent(parent store.KVStore) store.BranchedKVStore { + return &Store{ + parent: parent, + storeKey: parent.GetStoreKey(), + changeset: make(map[string]store.KVPair), + } +} + +func (s *Store) GetStoreKey() string { + return s.storeKey +} + +func (s *Store) GetStoreType() store.StoreType { + return store.StoreTypeBranch +} + +// GetChangeset returns the uncommitted writes to the store, ordered by key. +func (s *Store) GetChangeset() *store.Changeset { + keys := maps.Keys(s.changeset) + slices.Sort(keys) + + pairs := make([]store.KVPair, len(keys)) + for i, key := range keys { + kvPair := s.changeset[key] + pairs[i] = store.KVPair{ + Key: []byte(key), + Value: slices.Clone(kvPair.Value), + StoreKey: kvPair.StoreKey, + } + } + + return store.NewChangeset(pairs...) +} + +func (s *Store) Reset() error { + s.mu.Lock() + defer s.mu.Unlock() + + latestVersion, err := s.storage.GetLatestVersion() + if err != nil { + return err + } + + clear(s.changeset) + s.version = latestVersion + + return nil +} + +func (s *Store) Branch() store.BranchedKVStore { + return NewWithParent(s) +} + +func (s *Store) BranchWithTrace(w io.Writer, tc store.TraceContext) store.BranchedKVStore { + return NewWithParent(tracekv.New(s, w, tc)) +} + +func (s *Store) Has(key []byte) bool { + store.AssertValidKey(key) + + s.mu.Lock() + defer s.mu.Unlock() + + // if the write is present in the changeset, i.e. a dirty write, evaluate it + if kvPair, ok := s.changeset[string(key)]; ok { + // a non-nil value indicates presence + return kvPair.Value != nil + } + + // if the store is branched, check the parent store + if s.parent != nil { + return s.parent.Has(key) + } + + // otherwise, we fallback to SS + ok, err := s.storage.Has(s.storeKey, s.version, key) + if err != nil { + panic(err) + } + + return ok +} + +func (s *Store) Get(key []byte) []byte { + store.AssertValidKey(key) + + s.mu.Lock() + defer s.mu.Unlock() + + // if the write is present in the changeset, i.e. a dirty write, evaluate it + if kvPair, ok := s.changeset[string(key)]; ok { + if kvPair.Value == nil { + return nil + } + + return slices.Clone(kvPair.Value) + } + + // if the store is branched, check the parent store + if s.parent != nil { + return s.parent.Get(key) + } + + // otherwise, we fallback to SS + bz, err := s.storage.Get(s.storeKey, s.version, key) + if err != nil { + panic(err) + } + + return bz +} + +func (s *Store) Set(key, value []byte) { + store.AssertValidKey(key) + store.AssertValidValue(value) + + s.mu.Lock() + defer s.mu.Unlock() + + // omit the key as that can be inferred from the map key + s.changeset[string(key)] = store.KVPair{Value: slices.Clone(value), StoreKey: s.storeKey} +} + +func (s *Store) Delete(key []byte) { + store.AssertValidKey(key) + + s.mu.Lock() + defer s.mu.Unlock() + + // omit the key as that can be inferred from the map key + s.changeset[string(key)] = store.KVPair{Value: nil, StoreKey: s.storeKey} +} + +func (s *Store) Write() { + s.mu.Lock() + defer s.mu.Unlock() + + // Note, we're only flushing the writes up to the parent, if it exists. We are + // not writing to the SS backend as that will happen in Commit(). + if s.parent != nil { + keys := maps.Keys(s.changeset) + slices.Sort(keys) + + // flush changes upstream to the parent in sorted order by key + for _, key := range keys { + kvPair := s.changeset[key] + + if kvPair.Value == nil { + s.parent.Delete([]byte(key)) + } else { + s.parent.Set([]byte(key), kvPair.Value) + } + } + } +} + +// Iterator creates an iterator over the domain [start, end), which walks over +// both the KVStore's changeset, i.e. dirty writes, and the parent iterator, +// which can either be another KVStore or the SS backend, at the same time. +// +// Note, writes that happen on the KVStore over an iterator will not affect the +// iterator. This is because when an iterator is created, it takes a current +// snapshot of the changeset. +func (s *Store) Iterator(start, end []byte) store.Iterator { + s.mu.Lock() + defer s.mu.Unlock() + + var parentItr store.Iterator + if s.parent != nil { + parentItr = s.parent.Iterator(start, end) + } else { + var err error + parentItr, err = s.storage.Iterator(s.storeKey, s.version, start, end) + if err != nil { + panic(err) + } + } + + return s.newIterator(parentItr, start, end, false) +} + +// ReverseIterator creates a reverse iterator over the domain [start, end), which +// walks over both the KVStore's changeset, i.e. dirty writes, and the parent +// iterator, which can either be another KVStore or the SS backend, at the same +// time. +// +// Note, writes that happen on the KVStore over an iterator will not affect the +// iterator. This is because when an iterator is created, it takes a current +// snapshot of the changeset. +func (s *Store) ReverseIterator(start, end []byte) store.Iterator { + s.mu.Lock() + defer s.mu.Unlock() + + var parentItr store.Iterator + if s.parent != nil { + parentItr = s.parent.ReverseIterator(start, end) + } else { + var err error + parentItr, err = s.storage.ReverseIterator(s.storeKey, s.version, start, end) + if err != nil { + panic(err) + } + } + + return s.newIterator(parentItr, start, end, true) +} + +func (s *Store) newIterator(parentItr store.Iterator, start, end []byte, reverse bool) *iterator { + startStr := string(start) + endStr := string(end) + + keys := make([]string, 0, len(s.changeset)) + for key := range s.changeset { + switch { + case start != nil && end != nil: + if key >= startStr && key < endStr { + keys = append(keys, key) + } + + case start != nil: + if key >= startStr { + keys = append(keys, key) + } + + case end != nil: + if key < endStr { + keys = append(keys, key) + } + + default: + keys = append(keys, key) + } + } + + slices.Sort(keys) + + if reverse { + slices.Reverse(keys) + } + + values := make([]store.KVPair, len(keys)) + for i, key := range keys { + values[i] = s.changeset[key] + } + + itr := &iterator{ + parentItr: parentItr, + start: start, + end: end, + keys: keys, + values: values, + reverse: reverse, + exhausted: !parentItr.Valid(), + } + + // call Next() to move the iterator to the first key/value entry + _ = itr.Next() + + return itr +} diff --git a/store/branchkv/store_test.go b/store/branchkv/store_test.go new file mode 100644 index 0000000000..d1cfacfa45 --- /dev/null +++ b/store/branchkv/store_test.go @@ -0,0 +1,562 @@ +package branchkv_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/branchkv" + "cosmossdk.io/store/v2/storage/sqlite" +) + +const storeKey = "storeKey" + +type StoreTestSuite struct { + suite.Suite + + storage store.VersionedDatabase + kvStore store.BranchedKVStore +} + +func TestStorageTestSuite(t *testing.T) { + suite.Run(t, &StoreTestSuite{}) +} + +func (s *StoreTestSuite) SetupTest() { + storage, err := sqlite.New(s.T().TempDir()) + s.Require().NoError(err) + + cs := new(store.Changeset) + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + cs.AddKVPair(store.KVPair{StoreKey: storeKey, Key: []byte(key), Value: []byte(val)}) + } + + s.Require().NoError(storage.ApplyChangeset(1, cs)) + + kvStore, err := branchkv.New(storeKey, storage) + s.Require().NoError(err) + + s.storage = storage + s.kvStore = kvStore +} + +func (s *StoreTestSuite) TestGetStoreType() { + s.Require().Equal(store.StoreTypeBranch, s.kvStore.GetStoreType()) +} + +func (s *StoreTestSuite) TestGetChangeset() { + // initial store with no writes should have an empty changeset + cs := s.kvStore.GetChangeset() + s.Require().Zero(cs.Size()) + + // perform some writes + s.kvStore.Set([]byte("key000"), []byte("updated_val000")) + s.kvStore.Delete([]byte("key001")) + + cs = s.kvStore.GetChangeset() + s.Require().Equal(cs.Size(), 2) +} + +func (s *StoreTestSuite) TestReset() { + s.Require().NoError(s.kvStore.Reset()) + + cs := s.kvStore.GetChangeset() + s.Require().Zero(cs.Size()) +} + +func (s *StoreTestSuite) TestGet() { + // perform read of key000, which is not dirty + bz := s.kvStore.Get([]byte("key000")) + s.Require().Equal([]byte("val000"), bz) + + // update key000 and perform a read which should reflect the new value + s.kvStore.Set([]byte("key000"), []byte("updated_val000")) + + bz = s.kvStore.Get([]byte("key000")) + s.Require().Equal([]byte("updated_val000"), bz) + + // ensure the primary SS backend is not modified + bz, err := s.storage.Get(storeKey, 1, []byte("key000")) + s.Require().NoError(err) + s.Require().Equal([]byte("val000"), bz) +} + +func (s *StoreTestSuite) TestHas() { + // perform read of key000, which is not dirty thus falling back to SS + ok := s.kvStore.Has([]byte("key000")) + s.Require().True(ok) + + ok = s.kvStore.Has([]byte("key100")) + s.Require().False(ok) + + // perform a write of a brand new key not in SS, but in the changeset + s.kvStore.Set([]byte("key100"), []byte("val100")) + + ok = s.kvStore.Has([]byte("key100")) + s.Require().True(ok) +} + +func (s *StoreTestSuite) TestBranch() { + // perform a few writes on the original store + s.kvStore.Set([]byte("key000"), []byte("updated_val000")) + s.kvStore.Set([]byte("key001"), []byte("updated_val001")) + + // create a new branch + b := s.kvStore.Branch() + + // update an existing dirty write + b.Set([]byte("key001"), []byte("branched_updated_val001")) + + // perform reads on the branched store without writing first + + // key000 is dirty in the original store, but not in the branched store + s.Require().Equal([]byte("updated_val000"), b.Get([]byte("key000"))) + + // key001 is dirty in both the original and branched store, but branched store + // should reflect the branched write. + s.Require().Equal([]byte("branched_updated_val001"), b.Get([]byte("key001"))) + + // key002 is not dirty in either store, so should fall back to SS + s.Require().Equal([]byte("val002"), b.Get([]byte("key002"))) + + // ensure the original store is not modified + s.Require().Equal([]byte("updated_val001"), s.kvStore.Get([]byte("key001"))) + + s.Require().Equal(1, b.GetChangeset().Size()) + s.Require().Equal([]byte("key001"), b.GetChangeset().Pairs[0].Key) + + // write the branched store and ensure all writes are flushed to the parent + b.Write() + s.Require().Equal([]byte("branched_updated_val001"), s.kvStore.Get([]byte("key001"))) + + s.Require().Equal(2, s.kvStore.GetChangeset().Size()) +} + +func (s *StoreTestSuite) TestIterator_NoWrites() { + // iterator without an end domain + s.Run("start_only", func() { + itr := s.kvStore.Iterator([]byte("key000"), nil) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator without a start domain + s.Run("end_only", func() { + itr := s.kvStore.Iterator(nil, []byte("key100")) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with with a start and end domain + s.Run("start_and_end", func() { + itr := s.kvStore.Iterator([]byte("key000"), []byte("key050")) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(50, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with an open domain + s.Run("open_domain", func() { + itr := s.kvStore.Iterator(nil, nil) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) +} + +func (s *StoreTestSuite) TestIterator_DirtyWrites() { + // modify all even keys + for i := 0; i < 100; i++ { + if i%2 == 0 { + key := fmt.Sprintf("key%03d", i) // key000, key002, ... + val := fmt.Sprintf("updated_val%03d", i) // updated_val000, updated_val002, ... + s.kvStore.Set([]byte(key), []byte(val)) + } + } + + // add some new keys to ensure we cover those as well + for i := 100; i < 150; i++ { + key := fmt.Sprintf("key%03d", i) // key100, key101, ... + val := fmt.Sprintf("val%03d", i) // val100, val101, ... + s.kvStore.Set([]byte(key), []byte(val)) + } + + // iterator without an end domain + s.Run("start_only", func() { + itr := s.kvStore.Iterator([]byte("key000"), nil) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i++ + count++ + } + s.Require().Equal(150, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator without a start domain + s.Run("end_only", func() { + itr := s.kvStore.Iterator(nil, []byte("key150")) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i++ + count++ + } + s.Require().Equal(150, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with with a start and end domain + s.Run("start_and_end", func() { + itr := s.kvStore.Iterator([]byte("key000"), []byte("key050")) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i++ + count++ + } + s.Require().Equal(50, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with an open domain + s.Run("open_domain", func() { + itr := s.kvStore.Iterator(nil, nil) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i++ + count++ + } + s.Require().Equal(150, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) +} + +func (s *StoreTestSuite) TestReverseIterator_NoWrites() { + // reverse iterator without an end domain + s.Run("start_only", func() { + itr := s.kvStore.ReverseIterator([]byte("key000"), nil) + defer itr.Close() + + i := 99 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // reverse iterator without a start domain + s.Run("end_only", func() { + itr := s.kvStore.ReverseIterator(nil, []byte("key100")) + defer itr.Close() + + i := 99 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // reverse iterator with with a start and end domain + s.Run("start_and_end", func() { + itr := s.kvStore.ReverseIterator([]byte("key000"), []byte("key050")) + defer itr.Close() + + i := 49 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(50, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // reverse iterator with an open domain + s.Run("open_domain", func() { + itr := s.kvStore.ReverseIterator(nil, nil) + defer itr.Close() + + i := 99 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) +} + +func (s *StoreTestSuite) TestReverseIterator_DirtyWrites() { + // modify all even keys + for i := 0; i < 100; i++ { + if i%2 == 0 { + key := fmt.Sprintf("key%03d", i) // key000, key002, ... + val := fmt.Sprintf("updated_val%03d", i) // updated_val000, updated_val002, ... + s.kvStore.Set([]byte(key), []byte(val)) + } + } + + // add some new keys to ensure we cover those as well + for i := 100; i < 150; i++ { + key := fmt.Sprintf("key%03d", i) // key100, key101, ... + val := fmt.Sprintf("val%03d", i) // val100, val101, ... + s.kvStore.Set([]byte(key), []byte(val)) + } + + // reverse iterator without an end domain + s.Run("start_only", func() { + itr := s.kvStore.ReverseIterator([]byte("key000"), nil) + defer itr.Close() + + i := 149 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), "itr_key: %s, count: %d", string(itr.Key()), count) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i-- + count++ + } + s.Require().Equal(150, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator without a start domain + s.Run("end_only", func() { + itr := s.kvStore.ReverseIterator(nil, []byte("key150")) + defer itr.Close() + + i := 149 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i-- + count++ + } + s.Require().Equal(150, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with with a start and end domain + s.Run("start_and_end", func() { + itr := s.kvStore.ReverseIterator([]byte("key000"), []byte("key050")) + defer itr.Close() + + i := 49 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i-- + count++ + } + s.Require().Equal(50, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with an open domain + s.Run("open_domain", func() { + itr := s.kvStore.ReverseIterator(nil, nil) + defer itr.Close() + + i := 149 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 && i < 100 { + s.Require().Equal([]byte(fmt.Sprintf("updated_val%03d", i)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + } + + i-- + count++ + } + s.Require().Equal(150, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) +} diff --git a/store/cache/benchmark_test.go b/store/cache/benchmark_test.go deleted file mode 100644 index 76f875a0d1..0000000000 --- a/store/cache/benchmark_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package cache - -import ( - "testing" - - "cosmossdk.io/store/types" -) - -func freshMgr() *CommitKVStoreCacheManager { - return &CommitKVStoreCacheManager{ - caches: map[string]types.CommitKVStore{ - "a1": nil, - "alalalalalal": nil, - }, - } -} - -func populate(mgr *CommitKVStoreCacheManager) { - mgr.caches["this one"] = (types.CommitKVStore)(nil) - mgr.caches["those ones are the ones"] = (types.CommitKVStore)(nil) - mgr.caches["very huge key right here and there are we going to ones are the ones"] = (types.CommitKVStore)(nil) -} - -func BenchmarkReset(b *testing.B) { - b.ReportAllocs() - mgr := freshMgr() - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - mgr.Reset() - if len(mgr.caches) != 0 { - b.Fatal("Reset failed") - } - populate(mgr) - if len(mgr.caches) == 0 { - b.Fatal("populate failed") - } - mgr.Reset() - if len(mgr.caches) != 0 { - b.Fatal("Reset failed") - } - } - - if mgr == nil { - b.Fatal("Impossible condition") - } -} diff --git a/store/cache/cache.go b/store/cache/cache.go deleted file mode 100644 index 98d17d0341..0000000000 --- a/store/cache/cache.go +++ /dev/null @@ -1,132 +0,0 @@ -package cache - -import ( - "fmt" - - lru "github.com/hashicorp/golang-lru" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/types" -) - -var ( - _ types.CommitKVStore = (*CommitKVStoreCache)(nil) - _ types.MultiStorePersistentCache = (*CommitKVStoreCacheManager)(nil) - - // DefaultCommitKVStoreCacheSize defines the persistent ARC cache size for a - // CommitKVStoreCache. - DefaultCommitKVStoreCacheSize uint = 1000 -) - -type ( - // CommitKVStoreCache implements an inter-block (persistent) cache that wraps a - // CommitKVStore. Reads first hit the internal ARC (Adaptive Replacement Cache). - // During a cache miss, the read is delegated to the underlying CommitKVStore - // and cached. Deletes and writes always happen to both the cache and the - // CommitKVStore in a write-through manner. Caching performed in the - // CommitKVStore and below is completely irrelevant to this layer. - CommitKVStoreCache struct { - types.CommitKVStore - cache *lru.ARCCache - } - - // CommitKVStoreCacheManager maintains a mapping from a StoreKey to a - // CommitKVStoreCache. Each CommitKVStore, per StoreKey, is meant to be used - // in an inter-block (persistent) manner and typically provided by a - // CommitMultiStore. - CommitKVStoreCacheManager struct { - cacheSize uint - caches map[string]types.CommitKVStore - } -) - -func NewCommitKVStoreCache(store types.CommitKVStore, size uint) *CommitKVStoreCache { - cache, err := lru.NewARC(int(size)) - if err != nil { - panic(fmt.Errorf("failed to create KVStore cache: %s", err)) - } - - return &CommitKVStoreCache{ - CommitKVStore: store, - cache: cache, - } -} - -func NewCommitKVStoreCacheManager(size uint) *CommitKVStoreCacheManager { - return &CommitKVStoreCacheManager{ - cacheSize: size, - caches: make(map[string]types.CommitKVStore), - } -} - -// GetStoreCache returns a Cache from the CommitStoreCacheManager for a given -// StoreKey. If no Cache exists for the StoreKey, then one is created and set. -// The returned Cache is meant to be used in a persistent manner. -func (cmgr *CommitKVStoreCacheManager) GetStoreCache(key types.StoreKey, store types.CommitKVStore) types.CommitKVStore { - if cmgr.caches[key.Name()] == nil { - cmgr.caches[key.Name()] = NewCommitKVStoreCache(store, cmgr.cacheSize) - } - - return cmgr.caches[key.Name()] -} - -// Unwrap returns the underlying CommitKVStore for a given StoreKey. -func (cmgr *CommitKVStoreCacheManager) Unwrap(key types.StoreKey) types.CommitKVStore { - if ckv, ok := cmgr.caches[key.Name()]; ok { - return ckv.(*CommitKVStoreCache).CommitKVStore - } - - return nil -} - -// Reset resets in the internal caches. -func (cmgr *CommitKVStoreCacheManager) Reset() { - // Clear the map. - // Please note that we are purposefully using the map clearing idiom. - // See https://github.com/cosmos/cosmos-sdk/issues/6681. - for key := range cmgr.caches { - delete(cmgr.caches, key) - } -} - -// CacheWrap implements the CacheWrapper interface -func (ckv *CommitKVStoreCache) CacheWrap() types.CacheWrap { - return cachekv.NewStore(ckv) -} - -// Get retrieves a value by key. It will first look in the write-through cache. -// If the value doesn't exist in the write-through cache, the query is delegated -// to the underlying CommitKVStore. -func (ckv *CommitKVStoreCache) Get(key []byte) []byte { - types.AssertValidKey(key) - - keyStr := string(key) - valueI, ok := ckv.cache.Get(keyStr) - if ok { - // cache hit - return valueI.([]byte) - } - - // cache miss; write to cache - value := ckv.CommitKVStore.Get(key) - ckv.cache.Add(keyStr, value) - - return value -} - -// Set inserts a key/value pair into both the write-through cache and the -// underlying CommitKVStore. -func (ckv *CommitKVStoreCache) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - - ckv.cache.Add(string(key), value) - ckv.CommitKVStore.Set(key, value) -} - -// Delete removes a key/value pair from both the write-through cache and the -// underlying CommitKVStore. -func (ckv *CommitKVStoreCache) Delete(key []byte) { - ckv.cache.Remove(string(key)) - ckv.CommitKVStore.Delete(key) -} diff --git a/store/cache/cache_test.go b/store/cache/cache_test.go deleted file mode 100644 index 0340f7ecbd..0000000000 --- a/store/cache/cache_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package cache_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/cache" - "cosmossdk.io/store/cachekv" - iavlstore "cosmossdk.io/store/iavl" - "cosmossdk.io/store/types" -) - -func TestGetOrSetStoreCache(t *testing.T) { - db := dbm.NewMemDB() - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - store2 := mngr.GetStoreCache(sKey, store) - - require.NotNil(t, store2) - require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) -} - -func TestUnwrap(t *testing.T) { - db := dbm.NewMemDB() - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - _ = mngr.GetStoreCache(sKey, store) - - require.Equal(t, store, mngr.Unwrap(sKey)) - require.Nil(t, mngr.Unwrap(types.NewKVStoreKey("test2"))) -} - -func TestStoreCache(t *testing.T) { - db := dbm.NewMemDB() - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - kvStore := mngr.GetStoreCache(sKey, store) - - for i := uint(0); i < cache.DefaultCommitKVStoreCacheSize*2; i++ { - key := []byte(fmt.Sprintf("key_%d", i)) - value := []byte(fmt.Sprintf("value_%d", i)) - - kvStore.Set(key, value) - - res := kvStore.Get(key) - require.Equal(t, res, value) - require.Equal(t, res, store.Get(key)) - - kvStore.Delete(key) - - require.Nil(t, kvStore.Get(key)) - require.Nil(t, store.Get(key)) - } -} - -func TestReset(t *testing.T) { - db := dbm.NewMemDB() - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - store2 := mngr.GetStoreCache(sKey, store) - - require.NotNil(t, store2) - require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) - - // reset and check if the cache is gone - mngr.Reset() - require.Nil(t, mngr.Unwrap(sKey)) - - // check if the cache is recreated - require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) -} - -func TestCacheWrap(t *testing.T) { - db := dbm.NewMemDB() - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - - cacheWrapper := mngr.GetStoreCache(sKey, store).CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) -} diff --git a/store/cachekv/README.md b/store/cachekv/README.md deleted file mode 100644 index 66f0916dea..0000000000 --- a/store/cachekv/README.md +++ /dev/null @@ -1,140 +0,0 @@ -# CacheKVStore specification - -A `CacheKVStore` is cache wrapper for a `KVStore`. It extends the operations of the `KVStore` to work with a write-back cache, allowing for reduced I/O operations and more efficient disposing of changes (e.g. after processing a failed transaction). - -The core goals the CacheKVStore seeks to solve are: - -* Buffer all writes to the parent store, so they can be dropped if they need to be reverted -* Allow iteration over contiguous spans of keys -* Act as a cache, improving access time for reads that have already been done (by replacing tree access with hashtable access, avoiding disk I/O) - * Note: We actually fail to achieve this for iteration right now - * Note: Need to consider this getting too large and dropping some cached reads -* Make subsequent reads account for prior buffered writes -* Write all buffered changes to the parent store - -We should revisit these goals with time (for instance it's unclear that all disk writes need to be buffered to the end of the block), but this is the current status. - -## Types and Structs - -```go -type Store struct { - mtx sync.Mutex - cache map[string]*cValue - deleted map[string]struct{} - unsortedCache map[string]struct{} - sortedCache *dbm.MemDB // always ascending sorted - parent types.KVStore -} -``` - -The Store struct wraps the underlying `KVStore` (`parent`) with additional data structures for implementing the cache. Mutex is used as IAVL trees (the `KVStore` in application) are not safe for concurrent use. - -### `cache` - -The main mapping of key-value pairs stored in cache. This map contains both keys that are cached from read operations as well as ‘dirty’ keys which map to a value that is potentially different than what is in the underlying `KVStore`. - -Values that are mapped to in `cache` are wrapped in a `cValue` struct, which contains the value and a boolean flag (`dirty`) representing whether the value has been written since the last write-back to `parent`. - -```go -type cValue struct { - value []byte - dirty bool -} -``` - -### `deleted` - -Key-value pairs that are to be deleted from `parent` are stored in the `deleted` map. Keys are mapped to an empty struct to implement a set. - -### `unsortedCache` - -Similar to `deleted`, this is a set of keys that are dirty and will need to be updated in the parent `KVStore` upon a write. Keys are mapped to an empty struct to implement a set. - -### `sortedCache` - -A database that will be populated by the keys in `unsortedCache` during iteration over the cache. The keys are always held in sorted order. - -## CRUD Operations and Writing - -The `Set`, `Get`, and `Delete` functions all call `setCacheValue()`, which is the only entry point to mutating `cache` (besides `Write()`, which clears it). - -`setCacheValue()` inserts a key-value pair into `cache`. Two boolean parameters, `deleted` and `dirty`, are passed in to flag whether the inserted key should also be inserted into the `deleted` and `dirty` sets. Keys will be removed from the `deleted` set if they are written to after being deleted. - -### `Get` - -`Get` first attempts to return the value from `cache`. If the key does not exist in `cache`, `parent.Get()` is called instead. This value from the parent is passed into `setCacheValue()` with `deleted=false` and `dirty=false`. - -### `Has` - -`Has` returns true if `Get` returns a non-nil value. As a result of calling `Get`, it may mutate the cache by caching the read. - -### `Set` - -New values are written by setting or updating the value of a key in `cache`. `Set` does not write to `parent`. - -Calls `setCacheValue()` with `deleted=false` and `dirty=true`. - -### `Delete` - -A value being deleted from the `KVStore` is represented with a `nil` value in `cache`, and an insertion of the key into the `deleted` set. `Delete` does not write to `parent`. - -Calls `setCacheValue()` with `deleted=true` and `dirty=true`. - -### `Write` - -Key-value pairs in the cache are written to `parent` in ascending order of their keys. - -A slice of all dirty keys in `cache` is made, then sorted in increasing order. These keys are iterated over to update `parent`. - -If a key is marked for deletion (checked with `isDeleted()`), then `parent.Delete()` is called. Otherwise, `parent.Set()` is called to update the underlying `KVStore` with the value in cache. - -## Iteration - -Efficient iteration over keys in `KVStore` is important for generating Merkle range proofs. Iteration over `CacheKVStore` requires producing all key-value pairs from the underlying `KVStore` while taking into account updated values from the cache. - -In the current implementation, there is no guarantee that all values in `parent` have been cached. As a result, iteration is achieved by interleaved iteration through both `parent` and the cache (failing to actually benefit from caching). - -[cacheMergeIterator](https://github.com/cosmos/cosmos-sdk/blob/d8391cb6796d770b02448bee70b865d824e43449/store/cachekv/mergeiterator.go) implements functions to provide a single iterator with an input of iterators over `parent` and the cache. This iterator iterates over keys from both iterators in a shared lexicographic order, and overrides the value provided by the parent iterator if the same key is dirty or deleted in the cache. - -### Implementation Overview - -Iterators over `parent` and the cache are generated and passed into `cacheMergeIterator`, which returns a single, interleaved iterator. Implementation of the `parent` iterator is up to the underlying `KVStore`. The remainder of this section covers the generation of the cache iterator. - -Recall that `unsortedCache` is an unordered set of dirty cache keys. Our goal is to construct an ordered iterator over cache keys that fall within the `start` and `end` bounds requested. - -Generating the cache iterator can be decomposed into four parts: - -1. Finding all keys that exist in the range we are iterating over -2. Sorting this list of keys -3. Inserting these keys into `sortedCache` and removing them from `unsortedCache` -4. Returning an iterator over `sortedCache` with the desired range - -Currently, the implementation for the first two parts is split into two cases, depending on the size of the unsorted cache. The two cases are as follows. - -If the size of `unsortedCache` is less than `minSortSize` (currently 1024), a linear time approach is taken to search over keys. - -```go -n := len(store.unsortedCache) -unsorted := make([]*kv.Pair, 0) - -if n < minSortSize { - for key := range store.unsortedCache { - if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { - cacheValue := store.cache[key] - unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - } - store.clearUnsortedCacheSubset(unsorted, stateUnsorted) - return -} -``` - -Here, we iterate through all the keys in `unsortedCache` (i.e., the dirty cache keys), collecting those within the requested range in an unsorted slice called `unsorted`. - -At this point, part 3. is achieved in `clearUnsortedCacheSubset()`. This function iterates through `unsorted`, removing each key from `unsortedCache`. Afterwards, `unsorted` is sorted. Lastly, it iterates through the now sorted slice, inserting key-value pairs into `sortedCache`. Any key marked for deletion is mapped to an arbitrary value (`[]byte{}`). - -In the case that the size of `unsortedCache` is larger than `minSortSize`, a linear time approach to finding keys within the desired range is too slow to use. Instead, a slice of all keys in `unsortedCache` is sorted, and binary search is used to find the beginning and ending indices of the desired range. This produces an already-sorted slice that is passed into the same `clearUnsortedCacheSubset()` function. An iota identifier (`sortedState`) is used to skip the sorting step in the function. - -Finally, part 4. is achieved with `memIterator`, which implements an iterator over the items in `sortedCache`. - -As of [PR #12885](https://github.com/cosmos/cosmos-sdk/pull/12885), an optimization to the binary search case mitigates the overhead of sorting the entirety of the key set in `unsortedCache`. To avoid wasting the compute spent sorting, we should ensure that a reasonable amount of values are removed from `unsortedCache`. If the length of the range for iteration is less than `minSortedCache`, we widen the range of values for removal from `unsortedCache` to be up to `minSortedCache` in length. This amortizes the cost of processing elements across multiple calls. \ No newline at end of file diff --git a/store/cachekv/bench_helper_test.go b/store/cachekv/bench_helper_test.go deleted file mode 100644 index be7fec4b3a..0000000000 --- a/store/cachekv/bench_helper_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package cachekv_test - -import "crypto/rand" - -func randSlice(sliceSize int) []byte { - bz := make([]byte, sliceSize) - _, _ = rand.Read(bz) - return bz -} - -func incrementByteSlice(bz []byte) { - for index := len(bz) - 1; index >= 0; index-- { - if bz[index] < 255 { - bz[index]++ - break - } else { - bz[index] = 0 - } - } -} - -// Generate many keys starting at startKey, and are in sequential order -func generateSequentialKeys(startKey []byte, numKeys int) [][]byte { - toReturn := make([][]byte, 0, numKeys) - cur := make([]byte, len(startKey)) - copy(cur, startKey) - for i := 0; i < numKeys; i++ { - newKey := make([]byte, len(startKey)) - copy(newKey, cur) - toReturn = append(toReturn, newKey) - incrementByteSlice(cur) - } - return toReturn -} - -// Generate many random, unsorted keys -func generateRandomKeys(keySize, numKeys int) [][]byte { - toReturn := make([][]byte, 0, numKeys) - for i := 0; i < numKeys; i++ { - newKey := randSlice(keySize) - toReturn = append(toReturn, newKey) - } - return toReturn -} diff --git a/store/cachekv/benchmark_test.go b/store/cachekv/benchmark_test.go deleted file mode 100644 index 158549b4bd..0000000000 --- a/store/cachekv/benchmark_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package cachekv_test - -import ( - fmt "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/types" -) - -func DoBenchmarkDeepCacheStack(b *testing.B, depth int) { - b.Helper() - db := dbm.NewMemDB() - initialStore := cachekv.NewStore(dbadapter.Store{DB: db}) - - nItems := 20 - for i := 0; i < nItems; i++ { - initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0}) - } - - var stack CacheStack - stack.Reset(initialStore) - - for i := 0; i < depth; i++ { - stack.Snapshot() - - store := stack.CurrentStore() - store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)}) - } - - store := stack.CurrentStore() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - it := store.Iterator(nil, nil) - items := make([][]byte, 0, nItems) - for ; it.Valid(); it.Next() { - items = append(items, it.Key()) - it.Value() - } - it.Close() - require.Equal(b, nItems, len(items)) - } -} - -func BenchmarkDeepCacheStack1(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 1) -} - -func BenchmarkDeepCacheStack3(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 3) -} - -func BenchmarkDeepCacheStack10(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 10) -} - -func BenchmarkDeepCacheStack13(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 13) -} - -// CacheStack manages a stack of nested cache store to -// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods. -type CacheStack struct { - initialStore types.CacheKVStore - // Context of the initial state before transaction execution. - // It's the context used by `StateDB.CommitedState`. - cacheStores []types.CacheKVStore -} - -// CurrentContext returns the top context of cached stack, -// if the stack is empty, returns the initial context. -func (cs *CacheStack) CurrentStore() types.CacheKVStore { - l := len(cs.cacheStores) - if l == 0 { - return cs.initialStore - } - return cs.cacheStores[l-1] -} - -// Reset sets the initial context and clear the cache context stack. -func (cs *CacheStack) Reset(initialStore types.CacheKVStore) { - cs.initialStore = initialStore - cs.cacheStores = nil -} - -// IsEmpty returns true if the cache context stack is empty. -func (cs *CacheStack) IsEmpty() bool { - return len(cs.cacheStores) == 0 -} - -// Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts. -func (cs *CacheStack) Commit() { - // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i >= 0; i-- { - cs.cacheStores[i].Write() - } - cs.cacheStores = nil -} - -// CommitToRevision commit the cache after the target revision, -// to improve efficiency of db operations. -func (cs *CacheStack) CommitToRevision(target int) error { - if target < 0 || target >= len(cs.cacheStores) { - return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)) - } - - // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i > target; i-- { - cs.cacheStores[i].Write() - } - cs.cacheStores = cs.cacheStores[0 : target+1] - - return nil -} - -// Snapshot pushes a new cached context to the stack, -// and returns the index of it. -func (cs *CacheStack) Snapshot() int { - cs.cacheStores = append(cs.cacheStores, cachekv.NewStore(cs.CurrentStore())) - return len(cs.cacheStores) - 1 -} - -// RevertToSnapshot pops all the cached contexts after the target index (inclusive). -// the target should be snapshot index returned by `Snapshot`. -// This function panics if the index is out of bounds. -func (cs *CacheStack) RevertToSnapshot(target int) { - if target < 0 || target >= len(cs.cacheStores) { - panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))) - } - cs.cacheStores = cs.cacheStores[:target] -} diff --git a/store/cachekv/internal/btree.go b/store/cachekv/internal/btree.go deleted file mode 100644 index 209f7e58c4..0000000000 --- a/store/cachekv/internal/btree.go +++ /dev/null @@ -1,91 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "github.com/tidwall/btree" - - "cosmossdk.io/store/types" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - // copied from memdb. - bTreeDegree = 32 -) - -var errKeyEmpty = errors.New("key cannot be empty") - -// BTree implements the sorted cache for cachekv store, -// we don't use MemDB here because cachekv is used extensively in sdk core path, -// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. -// -// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. -type BTree struct { - tree *btree.BTreeG[item] -} - -// NewBTree creates a wrapper around `btree.BTreeG`. -func NewBTree() BTree { - return BTree{ - tree: btree.NewBTreeGOptions(byKeys, btree.Options{ - Degree: bTreeDegree, - NoLocks: false, - }), - } -} - -func (bt BTree) Set(key, value []byte) { - bt.tree.Set(newItem(key, value)) -} - -func (bt BTree) Get(key []byte) []byte { - i, found := bt.tree.Get(newItem(key, nil)) - if !found { - return nil - } - return i.value -} - -func (bt BTree) Delete(key []byte) { - bt.tree.Delete(newItem(key, nil)) -} - -func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, true), nil -} - -func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, false), nil -} - -// Copy the tree. This is a copy-on-write operation and is very fast because -// it only performs a shadowed copy. -func (bt BTree) Copy() BTree { - return BTree{ - tree: bt.tree.Copy(), - } -} - -// item is a btree item with byte slices as keys and values -type item struct { - key []byte - value []byte -} - -// byKeys compares the items by key -func byKeys(a, b item) bool { - return bytes.Compare(a.key, b.key) == -1 -} - -// newItem creates a new pair item. -func newItem(key, value []byte) item { - return item{key: key, value: value} -} diff --git a/store/cachekv/internal/btree_test.go b/store/cachekv/internal/btree_test.go deleted file mode 100644 index 06437997f6..0000000000 --- a/store/cachekv/internal/btree_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package internal - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" -) - -func TestGetSetDelete(t *testing.T) { - db := NewBTree() - - // A nonexistent key should return nil. - value := db.Get([]byte("a")) - require.Nil(t, value) - - // Set and get a value. - db.Set([]byte("a"), []byte{0x01}) - db.Set([]byte("b"), []byte{0x02}) - value = db.Get([]byte("a")) - require.Equal(t, []byte{0x01}, value) - - value = db.Get([]byte("b")) - require.Equal(t, []byte{0x02}, value) - - // Deleting a non-existent value is fine. - db.Delete([]byte("x")) - - // Delete a value. - db.Delete([]byte("a")) - - value = db.Get([]byte("a")) - require.Nil(t, value) - - db.Delete([]byte("b")) - - value = db.Get([]byte("b")) - require.Nil(t, value) -} - -func TestDBIterator(t *testing.T) { - db := NewBTree() - - for i := 0; i < 10; i++ { - if i != 6 { // but skip 6. - db.Set(int642Bytes(int64(i)), []byte{}) - } - } - - // Blank iterator keys should error - _, err := db.ReverseIterator([]byte{}, nil) - require.Equal(t, errKeyEmpty, err) - _, err = db.ReverseIterator(nil, []byte{}) - require.Equal(t, errKeyEmpty, err) - - itr, err := db.Iterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") - - ritr, err := db.ReverseIterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") - - itr, err = db.Iterator(nil, int642Bytes(0)) - require.NoError(t, err) - verifyIterator(t, itr, []int64(nil), "forward iterator to 0") - - ritr, err = db.ReverseIterator(int642Bytes(10), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)") - - itr, err = db.Iterator(int642Bytes(0), nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") - - itr, err = db.Iterator(int642Bytes(1), nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") - - ritr, err = db.ReverseIterator(nil, int642Bytes(10)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") - - ritr, err = db.ReverseIterator(nil, int642Bytes(9)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") - - ritr, err = db.ReverseIterator(nil, int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8") - - itr, err = db.Iterator(int642Bytes(6), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7") - - itr, err = db.Iterator(int642Bytes(6), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8") - - itr, err = db.Iterator(int642Bytes(7), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{5, 4}, "reverse iterator from 6 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{5, 4}, "reverse iterator from 7 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5") - - ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5") - - ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64(nil), "reverse iterator from 7 (ex) to 6") - - ritr, err = db.ReverseIterator(int642Bytes(10), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") - - ritr, err = db.ReverseIterator(int642Bytes(6), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") - - ritr, err = db.ReverseIterator(int642Bytes(5), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") - - ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8") - - ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{3, 2}, "reverse iterator from 4 (ex) to 2") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64(nil), "reverse iterator from 2 (ex) to 4") - - // Ensure that the iterators don't panic with an empty database. - db2 := NewBTree() - - itr, err = db2.Iterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, itr, nil, "forward iterator with empty db") - - ritr, err = db2.ReverseIterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, ritr, nil, "reverse iterator with empty db") -} - -func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) { - t.Helper() - i := 0 - for itr.Valid() { - key := itr.Key() - require.Equal(t, expected[i], bytes2Int64(key), "iterator: %d mismatches", i) - itr.Next() - i++ - } - require.Equal(t, i, len(expected), "expected to have fully iterated over all the elements in iter") - require.NoError(t, itr.Close()) -} - -func int642Bytes(i int64) []byte { - return types.Uint64ToBigEndian(uint64(i)) -} - -func bytes2Int64(buf []byte) int64 { - return int64(types.BigEndianToUint64(buf)) -} diff --git a/store/cachekv/internal/memiterator.go b/store/cachekv/internal/memiterator.go deleted file mode 100644 index 9dbba75870..0000000000 --- a/store/cachekv/internal/memiterator.go +++ /dev/null @@ -1,120 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "github.com/tidwall/btree" - - "cosmossdk.io/store/types" -) - -var _ types.Iterator = (*memIterator)(nil) - -// memIterator iterates over iterKVCache items. -// if value is nil, means it was deleted. -// Implements Iterator. -type memIterator struct { - iter btree.IterG[item] - - start []byte - end []byte - ascending bool - valid bool -} - -func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { - iter := items.tree.Iter() - var valid bool - if ascending { - if start != nil { - valid = iter.Seek(newItem(start, nil)) - } else { - valid = iter.First() - } - } else { - if end != nil { - valid = iter.Seek(newItem(end, nil)) - if !valid { - valid = iter.Last() - } else { - // end is exclusive - valid = iter.Prev() - } - } else { - valid = iter.Last() - } - } - - mi := &memIterator{ - iter: iter, - start: start, - end: end, - ascending: ascending, - valid: valid, - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } - - return mi -} - -func (mi *memIterator) Domain() (start, end []byte) { - return mi.start, mi.end -} - -func (mi *memIterator) Close() error { - mi.iter.Release() - return nil -} - -func (mi *memIterator) Error() error { - if !mi.Valid() { - return errors.New("invalid memIterator") - } - return nil -} - -func (mi *memIterator) Valid() bool { - return mi.valid -} - -func (mi *memIterator) Next() { - mi.assertValid() - - if mi.ascending { - mi.valid = mi.iter.Next() - } else { - mi.valid = mi.iter.Prev() - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } -} - -func (mi *memIterator) keyInRange(key []byte) bool { - if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { - return false - } - if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 { - return false - } - return true -} - -func (mi *memIterator) Key() []byte { - return mi.iter.Item().key -} - -func (mi *memIterator) Value() []byte { - return mi.iter.Item().value -} - -func (mi *memIterator) assertValid() { - if err := mi.Error(); err != nil { - panic(err) - } -} diff --git a/store/cachekv/internal/mergeiterator.go b/store/cachekv/internal/mergeiterator.go deleted file mode 100644 index 58e9497b30..0000000000 --- a/store/cachekv/internal/mergeiterator.go +++ /dev/null @@ -1,235 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "cosmossdk.io/store/types" -) - -// cacheMergeIterator merges a parent Iterator and a cache Iterator. -// The cache iterator may return nil keys to signal that an item -// had been deleted (but not deleted in the parent). -// If the cache iterator has the same key as the parent, the -// cache shadows (overrides) the parent. -// -// TODO: Optimize by memoizing. -type cacheMergeIterator struct { - parent types.Iterator - cache types.Iterator - ascending bool - - valid bool -} - -var _ types.Iterator = (*cacheMergeIterator)(nil) - -func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator { - iter := &cacheMergeIterator{ - parent: parent, - cache: cache, - ascending: ascending, - } - - iter.valid = iter.skipUntilExistsOrInvalid() - return iter -} - -// Domain implements Iterator. -// Returns parent domain because cache and parent domains are the same. -func (iter *cacheMergeIterator) Domain() (start, end []byte) { - return iter.parent.Domain() -} - -// Valid implements Iterator. -func (iter *cacheMergeIterator) Valid() bool { - return iter.valid -} - -// Next implements Iterator -func (iter *cacheMergeIterator) Next() { - iter.assertValid() - - switch { - case !iter.parent.Valid(): - // If parent is invalid, get the next cache item. - iter.cache.Next() - case !iter.cache.Valid(): - // If cache is invalid, get the next parent item. - iter.parent.Next() - default: - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - switch iter.compare(keyP, keyC) { - case -1: // parent < cache - iter.parent.Next() - case 0: // parent == cache - iter.parent.Next() - iter.cache.Next() - case 1: // parent > cache - iter.cache.Next() - } - } - iter.valid = iter.skipUntilExistsOrInvalid() -} - -// Key implements Iterator -func (iter *cacheMergeIterator) Key() []byte { - iter.assertValid() - - // If parent is invalid, get the cache key. - if !iter.parent.Valid() { - return iter.cache.Key() - } - - // If cache is invalid, get the parent key. - if !iter.cache.Valid() { - return iter.parent.Key() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return keyP - case 0: // parent == cache - return keyP - case 1: // parent > cache - return keyC - default: - panic("invalid compare result") - } -} - -// Value implements Iterator -func (iter *cacheMergeIterator) Value() []byte { - iter.assertValid() - - // If parent is invalid, get the cache value. - if !iter.parent.Valid() { - return iter.cache.Value() - } - - // If cache is invalid, get the parent value. - if !iter.cache.Valid() { - return iter.parent.Value() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return iter.parent.Value() - case 0: // parent == cache - return iter.cache.Value() - case 1: // parent > cache - return iter.cache.Value() - default: - panic("invalid comparison result") - } -} - -// Close implements Iterator -func (iter *cacheMergeIterator) Close() error { - err1 := iter.cache.Close() - if err := iter.parent.Close(); err != nil { - return err - } - - return err1 -} - -// Error returns an error if the cacheMergeIterator is invalid defined by the -// Valid method. -func (iter *cacheMergeIterator) Error() error { - if !iter.Valid() { - return errors.New("invalid cacheMergeIterator") - } - - return nil -} - -// If not valid, panics. -// NOTE: May have side-effect of iterating over cache. -func (iter *cacheMergeIterator) assertValid() { - if err := iter.Error(); err != nil { - panic(err) - } -} - -// Like bytes.Compare but opposite if not ascending. -func (iter *cacheMergeIterator) compare(a, b []byte) int { - if iter.ascending { - return bytes.Compare(a, b) - } - - return bytes.Compare(a, b) * -1 -} - -// Skip all delete-items from the cache w/ `key < until`. After this function, -// current cache item is a non-delete-item, or `until <= key`. -// If the current cache item is not a delete item, does nothing. -// If `until` is nil, there is no limit, and cache may end up invalid. -// CONTRACT: cache is valid. -func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { - for iter.cache.Valid() && - iter.cache.Value() == nil && - (until == nil || iter.compare(iter.cache.Key(), until) < 0) { - iter.cache.Next() - } -} - -// Fast forwards cache (or parent+cache in case of deleted items) until current -// item exists, or until iterator becomes invalid. -// Returns whether the iterator is valid. -func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { - for { - // If parent is invalid, fast-forward cache. - if !iter.parent.Valid() { - iter.skipCacheDeletes(nil) - return iter.cache.Valid() - } - // Parent is valid. - - if !iter.cache.Valid() { - return true - } - // Parent is valid, cache is valid. - - // Compare parent and cache. - keyP := iter.parent.Key() - keyC := iter.cache.Key() - - switch iter.compare(keyP, keyC) { - case -1: // parent < cache. - return true - - case 0: // parent == cache. - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.parent.Next() - iter.cache.Next() - - continue - } - // Cache is not a delete. - - return true // cache exists. - case 1: // cache < parent - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.skipCacheDeletes(keyP) - continue - } - // Cache is not a delete. - - return true // cache exists. - } - } -} diff --git a/store/cachekv/search_benchmark_test.go b/store/cachekv/search_benchmark_test.go deleted file mode 100644 index ecdc86a8e4..0000000000 --- a/store/cachekv/search_benchmark_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package cachekv - -import ( - "strconv" - "testing" - - "cosmossdk.io/store/cachekv/internal" -) - -func BenchmarkLargeUnsortedMisses(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - store := generateStore() - b.StartTimer() - - for k := 0; k < 10000; k++ { - // cache has A + Z values - // these are within range, but match nothing - store.dirtyItems([]byte("B1"), []byte("B2")) - } - } -} - -func generateStore() *Store { - cache := map[string]*cValue{} - unsorted := map[string]struct{}{} - for i := 0; i < 5000; i++ { - key := "A" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - for i := 0; i < 5000; i++ { - key := "Z" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - return &Store{ - cache: cache, - unsortedCache: unsorted, - sortedCache: internal.NewBTree(), - } -} diff --git a/store/cachekv/search_test.go b/store/cachekv/search_test.go deleted file mode 100644 index 41321c076e..0000000000 --- a/store/cachekv/search_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package cachekv - -import "testing" - -func TestFindStartIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 8, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent but within >=start", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: -1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findStartIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} - -func TestFindEndIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 7, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: -1, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: 4, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findEndIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} diff --git a/store/cachekv/store.go b/store/cachekv/store.go deleted file mode 100644 index 08cfc2b325..0000000000 --- a/store/cachekv/store.go +++ /dev/null @@ -1,408 +0,0 @@ -package cachekv - -import ( - "bytes" - "io" - "sort" - "sync" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/math" - "cosmossdk.io/store/cachekv/internal" - "cosmossdk.io/store/internal/conv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -// cValue represents a cached value. -// If dirty is true, it indicates the cached value is different from the underlying value. -type cValue struct { - value []byte - dirty bool -} - -// Store wraps an in-memory cache around an underlying types.KVStore. -type Store struct { - mtx sync.Mutex - cache map[string]*cValue - unsortedCache map[string]struct{} - sortedCache internal.BTree // always ascending sorted - parent types.KVStore -} - -var _ types.CacheKVStore = (*Store)(nil) - -// NewStore creates a new Store object -func NewStore(parent types.KVStore) *Store { - return &Store{ - cache: make(map[string]*cValue), - unsortedCache: make(map[string]struct{}), - sortedCache: internal.NewBTree(), - parent: parent, - } -} - -// GetStoreType implements Store. -func (store *Store) GetStoreType() types.StoreType { - return store.parent.GetStoreType() -} - -// Get implements types.KVStore. -func (store *Store) Get(key []byte) (value []byte) { - store.mtx.Lock() - defer store.mtx.Unlock() - - types.AssertValidKey(key) - - cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)] - if !ok { - value = store.parent.Get(key) - store.setCacheValue(key, value, false) - } else { - value = cacheValue.value - } - - return value -} - -// Set implements types.KVStore. -func (store *Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - - store.mtx.Lock() - defer store.mtx.Unlock() - store.setCacheValue(key, value, true) -} - -// Has implements types.KVStore. -func (store *Store) Has(key []byte) bool { - value := store.Get(key) - return value != nil -} - -// Delete implements types.KVStore. -func (store *Store) Delete(key []byte) { - types.AssertValidKey(key) - - store.mtx.Lock() - defer store.mtx.Unlock() - - store.setCacheValue(key, nil, true) -} - -func (store *Store) resetCaches() { - if len(store.cache) > 100_000 { - // Cache is too large. We likely did something linear time - // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. - // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. - // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem. - store.cache = make(map[string]*cValue) - store.unsortedCache = make(map[string]struct{}) - } else { - // Clear the cache using the map clearing idiom - // and not allocating fresh objects. - // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ - for key := range store.cache { - delete(store.cache, key) - } - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } - store.sortedCache = internal.NewBTree() -} - -// Implements Cachetypes.KVStore. -func (store *Store) Write() { - store.mtx.Lock() - defer store.mtx.Unlock() - - if len(store.cache) == 0 && len(store.unsortedCache) == 0 { - store.sortedCache = internal.NewBTree() - return - } - - type cEntry struct { - key string - val *cValue - } - - // We need a copy of all of the keys. - // Not the best. To reduce RAM pressure, we copy the values as well - // and clear out the old caches right after the copy. - sortedCache := make([]cEntry, 0, len(store.cache)) - - for key, dbValue := range store.cache { - if dbValue.dirty { - sortedCache = append(sortedCache, cEntry{key, dbValue}) - } - } - store.resetCaches() - sort.Slice(sortedCache, func(i, j int) bool { - return sortedCache[i].key < sortedCache[j].key - }) - - // TODO: Consider allowing usage of Batch, which would allow the write to - // at least happen atomically. - for _, obj := range sortedCache { - // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot - // be sure if the underlying store might do a save with the byteslice or - // not. Once we get confirmation that .Delete is guaranteed not to - // save the byteslice, then we can assume only a read-only copy is sufficient. - if obj.val.value != nil { - // It already exists in the parent, hence update it. - store.parent.Set([]byte(obj.key), obj.val.value) - } else { - store.parent.Delete([]byte(obj.key)) - } - } -} - -// CacheWrap implements CacheWrapper. -func (store *Store) CacheWrap() types.CacheWrap { - return NewStore(store) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return NewStore(tracekv.NewStore(store, w, tc)) -} - -//---------------------------------------- -// Iteration - -// Iterator implements types.KVStore. -func (store *Store) Iterator(start, end []byte) types.Iterator { - return store.iterator(start, end, true) -} - -// ReverseIterator implements types.KVStore. -func (store *Store) ReverseIterator(start, end []byte) types.Iterator { - return store.iterator(start, end, false) -} - -func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { - store.mtx.Lock() - defer store.mtx.Unlock() - - store.dirtyItems(start, end) - isoSortedCache := store.sortedCache.Copy() - - var ( - err error - parent, cache types.Iterator - ) - - if ascending { - parent = store.parent.Iterator(start, end) - cache, err = isoSortedCache.Iterator(start, end) - } else { - parent = store.parent.ReverseIterator(start, end) - cache, err = isoSortedCache.ReverseIterator(start, end) - } - if err != nil { - panic(err) - } - - return internal.NewCacheMergeIterator(parent, cache, ascending) -} - -func findStartIndex(strL []string, startQ string) int { - // Modified binary search to find the very first element in >=startQ. - if len(strL) == 0 { - return -1 - } - - var left, right, mid int - right = len(strL) - 1 - for left <= right { - mid = (left + right) >> 1 - midStr := strL[mid] - if midStr == startQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] != midStr { - return i + 1 - } - } - return 0 - } - if midStr < startQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - if left >= 0 && left < len(strL) && strL[left] >= startQ { - return left - } - return -1 -} - -func findEndIndex(strL []string, endQ string) int { - if len(strL) == 0 { - return -1 - } - - // Modified binary search to find the very first element > 1 - midStr := strL[mid] - if midStr == endQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] < midStr { - return i + 1 - } - } - return 0 - } - if midStr < endQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - - // Binary search failed, now let's find a value less than endQ. - for i := right; i >= 0; i-- { - if strL[i] < endQ { - return i - } - } - - return -1 -} - -type sortState int - -const ( - stateUnsorted sortState = iota - stateAlreadySorted -) - -const minSortSize = 1024 - -// Constructs a slice of dirty items, to use w/ memIterator. -func (store *Store) dirtyItems(start, end []byte) { - startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end) - if end != nil && startStr > endStr { - // Nothing to do here. - return - } - - n := len(store.unsortedCache) - unsorted := make([]*kv.Pair, 0) - // If the unsortedCache is too big, its costs too much to determine - // whats in the subset we are concerned about. - // If you are interleaving iterator calls with writes, this can easily become an - // O(N^2) overhead. - // Even without that, too many range checks eventually becomes more expensive - // than just not having the cache. - if n < minSortSize { - for key := range store.unsortedCache { - // dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start - if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { - cacheValue := store.cache[key] - unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - } - store.clearUnsortedCacheSubset(unsorted, stateUnsorted) - return - } - - // Otherwise it is large so perform a modified binary search to find - // the target ranges for the keys that we should be looking for. - strL := make([]string, 0, n) - for key := range store.unsortedCache { - strL = append(strL, key) - } - sort.Strings(strL) - - // Now find the values within the domain - // [start, end) - startIndex := findStartIndex(strL, startStr) - if startIndex < 0 { - startIndex = 0 - } - - var endIndex int - if end == nil { - endIndex = len(strL) - 1 - } else { - endIndex = findEndIndex(strL, endStr) - } - if endIndex < 0 { - endIndex = len(strL) - 1 - } - - // Since we spent cycles to sort the values, we should process and remove a reasonable amount - // ensure start to end is at least minSortSize in size - // if below minSortSize, expand it to cover additional values - // this amortizes the cost of processing elements across multiple calls - if endIndex-startIndex < minSortSize { - endIndex = math.Min(startIndex+minSortSize, len(strL)-1) - if endIndex-startIndex < minSortSize { - startIndex = math.Max(endIndex-minSortSize, 0) - } - } - - kvL := make([]*kv.Pair, 0, 1+endIndex-startIndex) - for i := startIndex; i <= endIndex; i++ { - key := strL[i] - cacheValue := store.cache[key] - kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - - // kvL was already sorted so pass it in as is. - store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) -} - -func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { - n := len(store.unsortedCache) - if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } else { // Otherwise, normally delete the unsorted keys from the map. - for _, kv := range unsorted { - delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) - } - } - - if sortState == stateUnsorted { - sort.Slice(unsorted, func(i, j int) bool { - return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0 - }) - } - - for _, item := range unsorted { - // sortedCache is able to store `nil` value to represent deleted items. - store.sortedCache.Set(item.Key, item.Value) - } -} - -//---------------------------------------- -// etc - -// Only entrypoint to mutate store.cache. -// A `nil` value means a deletion. -func (store *Store) setCacheValue(key, value []byte, dirty bool) { - keyStr := conv.UnsafeBytesToStr(key) - store.cache[keyStr] = &cValue{ - value: value, - dirty: dirty, - } - if dirty { - store.unsortedCache[keyStr] = struct{}{} - } -} diff --git a/store/cachekv/store_bench_test.go b/store/cachekv/store_bench_test.go deleted file mode 100644 index 8f15855e09..0000000000 --- a/store/cachekv/store_bench_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package cachekv_test - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" -) - -var sink interface{} - -const defaultValueSizeBz = 1 << 12 - -// This benchmark measures the time of iterator.Next() when the parent store is blank -func benchmarkBlankParentIteratorNext(b *testing.B, keysize int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - // Use a singleton for value, to not waste time computing it - value := randSlice(defaultValueSizeBz) - // Use simple values for keys, pick a random start, - // and take next b.N keys sequentially after.] - startKey := randSlice(32) - - // Add 1 to avoid issues when b.N = 1 - keys := generateSequentialKeys(startKey, b.N+1) - for _, k := range keys { - kvstore.Set(k, value) - } - - b.ReportAllocs() - b.ResetTimer() - - iter := kvstore.Iterator(keys[0], keys[b.N]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -// Benchmark setting New keys to a store, where the new keys are in sequence. -func benchmarkBlankParentAppend(b *testing.B, keysize int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - - // Use a singleton for value, to not waste time computing it - value := randSlice(32) - // Use simple values for keys, pick a random start, - // and take next b.N keys sequentially after. - startKey := randSlice(32) - - keys := generateSequentialKeys(startKey, b.N) - - b.ReportAllocs() - b.ResetTimer() - - for _, k := range keys { - kvstore.Set(k, value) - } -} - -// Benchmark setting New keys to a store, where the new keys are random. -// the speed of this function does not depend on the values in the parent store -func benchmarkRandomSet(b *testing.B, keysize int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - - // Use a singleton for value, to not waste time computing it - value := randSlice(defaultValueSizeBz) - // Add 1 to avoid issues when b.N = 1 - keys := generateRandomKeys(keysize, b.N+1) - - b.ReportAllocs() - b.ResetTimer() - - for _, k := range keys { - kvstore.Set(k, value) - } - - iter := kvstore.Iterator(keys[0], keys[b.N]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -// Benchmark creating an iterator on a parent with D entries, -// that are all deleted in the cacheKV store. -// We essentially are benchmarking the cacheKV iterator creation & iteration times -// with the number of entries deleted in the parent. -func benchmarkIteratorOnParentWithManyDeletes(b *testing.B, numDeletes int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - - // Use a singleton for value, to not waste time computing it - value := randSlice(32) - // Use simple values for keys, pick a random start, - // and take next D keys sequentially after. - startKey := randSlice(32) - // Add 1 to avoid issues when numDeletes = 1 - keys := generateSequentialKeys(startKey, numDeletes+1) - // setup parent db with D keys. - for _, k := range keys { - mem.Set(k, value) - } - kvstore := cachekv.NewStore(mem) - // Delete all keys from the cache KV store. - // The keys[1:] is to keep at least one entry in parent, due to a bug in the SDK iterator design. - // Essentially the iterator will never be valid, in that it should never run. - // However, this is incompatible with the for loop structure the SDK uses, hence - // causes a panic. Thus we do keys[1:]. - for _, k := range keys[1:] { - kvstore.Delete(k) - } - - b.ReportAllocs() - b.ResetTimer() - - iter := kvstore.Iterator(keys[0], keys[numDeletes]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -func BenchmarkBlankParentIteratorNextKeySize32(b *testing.B) { - benchmarkBlankParentIteratorNext(b, 32) -} - -func BenchmarkBlankParentAppendKeySize32(b *testing.B) { - benchmarkBlankParentAppend(b, 32) -} - -func BenchmarkSetKeySize32(b *testing.B) { - benchmarkRandomSet(b, 32) -} - -func BenchmarkIteratorOnParentWith1MDeletes(b *testing.B) { - benchmarkIteratorOnParentWithManyDeletes(b, 1_000_000) -} diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go deleted file mode 100644 index 3c56223554..0000000000 --- a/store/cachekv/store_test.go +++ /dev/null @@ -1,694 +0,0 @@ -package cachekv_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/math/unsafe" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/types" -) - -func newCacheKVStore() types.CacheKVStore { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - return cachekv.NewStore(mem) -} - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -func TestCacheKVStore(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - - // put something in mem and in cache - mem.Set(keyFmt(1), valFmt(1)) - st.Set(keyFmt(1), valFmt(1)) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - - // update it in cache, shoudn't change mem - st.Set(keyFmt(1), valFmt(2)) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - require.Equal(t, valFmt(1), mem.Get(keyFmt(1))) - - // write it. should change mem - st.Write() - require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // more writes and checks - st.Write() - st.Write() - require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // make a new one, check it - st = cachekv.NewStore(mem) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // make a new one and delete - should not be removed from mem - st = cachekv.NewStore(mem) - st.Delete(keyFmt(1)) - require.Empty(t, st.Get(keyFmt(1))) - require.Equal(t, mem.Get(keyFmt(1)), valFmt(2)) - - // Write. should now be removed from both - st.Write() - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty") -} - -func TestCacheKVStoreNoNilSet(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - require.Panics(t, func() { st.Set([]byte("key"), nil) }, "setting a nil value should panic") - require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") -} - -func TestCacheKVStoreNested(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - - // set. check its there on st and not on mem. - st.Set(keyFmt(1), valFmt(1)) - require.Empty(t, mem.Get(keyFmt(1))) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - - // make a new from st and check - st2 := cachekv.NewStore(st) - require.Equal(t, valFmt(1), st2.Get(keyFmt(1))) - - // update the value on st2, check it only effects st2 - st2.Set(keyFmt(1), valFmt(3)) - require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - require.Equal(t, valFmt(3), st2.Get(keyFmt(1))) - - // st2 writes to its parent, st. doesnt effect mem - st2.Write() - require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(3), st.Get(keyFmt(1))) - - // updates mem - st.Write() - require.Equal(t, valFmt(3), mem.Get(keyFmt(1))) -} - -func TestCacheKVIteratorBounds(t *testing.T) { - st := newCacheKVStore() - - // set some items - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - itr := st.Iterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, nItems, i) - require.NoError(t, itr.Close()) - - // iterate over none - itr = st.Iterator(bz("money"), nil) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - } - require.Equal(t, 0, i) - require.NoError(t, itr.Close()) - - // iterate over lower - itr = st.Iterator(keyFmt(0), keyFmt(3)) - i = 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, 3, i) - require.NoError(t, itr.Close()) - - // iterate over upper - itr = st.Iterator(keyFmt(2), keyFmt(4)) - i = 2 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, 4, i) - require.NoError(t, itr.Close()) -} - -func TestCacheKVReverseIteratorBounds(t *testing.T) { - st := newCacheKVStore() - - // set some items - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - itr := st.ReverseIterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(nItems-1-i), k) - require.Equal(t, valFmt(nItems-1-i), v) - i++ - } - require.Equal(t, nItems, i) - require.NoError(t, itr.Close()) - - // iterate over none - itr = st.ReverseIterator(bz("money"), nil) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - } - require.Equal(t, 0, i) - require.NoError(t, itr.Close()) - - // iterate over lower - end := 3 - itr = st.ReverseIterator(keyFmt(0), keyFmt(end)) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(end-i), k) - require.Equal(t, valFmt(end-i), v) - } - require.Equal(t, 3, i) - require.NoError(t, itr.Close()) - - // iterate over upper - end = 4 - itr = st.ReverseIterator(keyFmt(2), keyFmt(end)) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(end-i), k) - require.Equal(t, valFmt(end-i), v) - } - require.Equal(t, 2, i) - require.NoError(t, itr.Close()) -} - -func TestCacheKVMergeIteratorBasics(t *testing.T) { - st := newCacheKVStore() - - // set and delete an item in the cache, iterator should be empty - k, v := keyFmt(0), valFmt(0) - st.Set(k, v) - st.Delete(k) - assertIterateDomain(t, st, 0) - - // now set it and assert its there - st.Set(k, v) - assertIterateDomain(t, st, 1) - - // write it and assert its there - st.Write() - assertIterateDomain(t, st, 1) - - // remove it in cache and assert its not - st.Delete(k) - assertIterateDomain(t, st, 0) - - // write the delete and assert its not there - st.Write() - assertIterateDomain(t, st, 0) - - // add two keys and assert theyre there - k1, v1 := keyFmt(1), valFmt(1) - st.Set(k, v) - st.Set(k1, v1) - assertIterateDomain(t, st, 2) - - // write it and assert theyre there - st.Write() - assertIterateDomain(t, st, 2) - - // remove one in cache and assert its not - st.Delete(k1) - assertIterateDomain(t, st, 1) - - // write the delete and assert its not there - st.Write() - assertIterateDomain(t, st, 1) - - // delete the other key in cache and asserts its empty - st.Delete(k) - assertIterateDomain(t, st, 0) -} - -func TestCacheKVMergeIteratorDeleteLast(t *testing.T) { - st := newCacheKVStore() - - // set some items and write them - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - st.Write() - - // set some more items and leave dirty - for i := nItems; i < nItems*2; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - assertIterateDomain(t, st, nItems*2) - - // delete them all - for i := 0; i < nItems*2; i++ { - last := nItems*2 - 1 - i - st.Delete(keyFmt(last)) - assertIterateDomain(t, st, last) - } -} - -func TestCacheKVMergeIteratorDeletes(t *testing.T) { - st := newCacheKVStore() - truth := dbm.NewMemDB() - - // set some items and write them - nItems := 10 - for i := 0; i < nItems; i++ { - doOp(t, st, truth, opSet, i) - } - st.Write() - - // delete every other item, starting from 0 - for i := 0; i < nItems; i += 2 { - doOp(t, st, truth, opDel, i) - assertIterateDomainCompare(t, st, truth) - } - - // reset - st = newCacheKVStore() - truth = dbm.NewMemDB() - - // set some items and write them - for i := 0; i < nItems; i++ { - doOp(t, st, truth, opSet, i) - } - st.Write() - - // delete every other item, starting from 1 - for i := 1; i < nItems; i += 2 { - doOp(t, st, truth, opDel, i) - assertIterateDomainCompare(t, st, truth) - } -} - -func TestCacheKVMergeIteratorChunks(t *testing.T) { - st := newCacheKVStore() - - // Use the truth to check values on the merge iterator - truth := dbm.NewMemDB() - - // sets to the parent - setRange(t, st, truth, 0, 20) - setRange(t, st, truth, 40, 60) - st.Write() - - // sets to the cache - setRange(t, st, truth, 20, 40) - setRange(t, st, truth, 60, 80) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 80}}) - - // remove some parents and some cache - deleteRange(t, st, truth, 15, 25) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 80}}) - - // remove some parents and some cache - deleteRange(t, st, truth, 35, 45) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {45, 80}}) - - // write, add more to the cache, and delete some cache - st.Write() - setRange(t, st, truth, 38, 42) - deleteRange(t, st, truth, 40, 43) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {38, 40}, {45, 80}}) -} - -func TestCacheKVMergeIteratorDomain(t *testing.T) { - st := newCacheKVStore() - - itr := st.Iterator(nil, nil) - start, end := itr.Domain() - require.Equal(t, start, end) - require.NoError(t, itr.Close()) - - itr = st.Iterator(keyFmt(40), keyFmt(60)) - start, end = itr.Domain() - require.Equal(t, keyFmt(40), start) - require.Equal(t, keyFmt(60), end) - require.NoError(t, itr.Close()) - - start, end = st.ReverseIterator(keyFmt(0), keyFmt(80)).Domain() - require.Equal(t, keyFmt(0), start) - require.Equal(t, keyFmt(80), end) -} - -func TestCacheKVMergeIteratorRandom(t *testing.T) { - st := newCacheKVStore() - truth := dbm.NewMemDB() - - start, end := 25, 975 - max := 1000 - setRange(t, st, truth, start, end) - - // do an op, test the iterator - for i := 0; i < 2000; i++ { - doRandomOp(t, st, truth, max) - assertIterateDomainCompare(t, st, truth) - } -} - -func TestNilEndIterator(t *testing.T) { - const SIZE = 3000 - - tests := []struct { - name string - write bool - startIndex int - end []byte - }{ - {name: "write=false, end=nil", write: false, end: nil, startIndex: 1000}, - {name: "write=false, end=nil; full key scan", write: false, end: nil, startIndex: 2000}, - {name: "write=true, end=nil", write: true, end: nil, startIndex: 1000}, - {name: "write=false, end=non-nil", write: false, end: keyFmt(3000), startIndex: 1000}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - st := newCacheKVStore() - - for i := 0; i < SIZE; i++ { - kstr := keyFmt(i) - st.Set(kstr, valFmt(i)) - } - - if tt.write { - st.Write() - } - - itr := st.Iterator(keyFmt(tt.startIndex), tt.end) - i := tt.startIndex - j := 0 - for itr.Valid() { - require.Equal(t, keyFmt(i), itr.Key()) - require.Equal(t, valFmt(i), itr.Value()) - itr.Next() - i++ - j++ - } - - require.Equal(t, SIZE-tt.startIndex, j) - require.NoError(t, itr.Close()) - }) - } -} - -// TestIteratorDeadlock demonstrate the deadlock issue in cache store. -func TestIteratorDeadlock(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - store := cachekv.NewStore(mem) - // the channel buffer is 64 and received once, so put at least 66 elements. - for i := 0; i < 66; i++ { - store.Set([]byte(fmt.Sprintf("key%d", i)), []byte{1}) - } - it := store.Iterator(nil, nil) - defer it.Close() - store.Set([]byte("key20"), []byte{1}) - // it'll be blocked here with previous version, or enable lock on btree. - it2 := store.Iterator(nil, nil) - defer it2.Close() -} - -//------------------------------------------------------------------------------------------- -// do some random ops - -const ( - opSet = 0 - opSetRange = 1 - opDel = 2 - opDelRange = 3 - opWrite = 4 - - totalOps = 5 // number of possible operations -) - -func randInt(n int) int { - return unsafe.NewRand().Int() % n -} - -// useful for replaying a error case if we find one -func doOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, op int, args ...int) { - t.Helper() - switch op { - case opSet: - k := args[0] - st.Set(keyFmt(k), valFmt(k)) - err := truth.Set(keyFmt(k), valFmt(k)) - require.NoError(t, err) - case opSetRange: - start := args[0] - end := args[1] - setRange(t, st, truth, start, end) - case opDel: - k := args[0] - st.Delete(keyFmt(k)) - err := truth.Delete(keyFmt(k)) - require.NoError(t, err) - case opDelRange: - start := args[0] - end := args[1] - deleteRange(t, st, truth, start, end) - case opWrite: - st.Write() - } -} - -func doRandomOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, maxKey int) { - t.Helper() - r := randInt(totalOps) - switch r { - case opSet: - k := randInt(maxKey) - st.Set(keyFmt(k), valFmt(k)) - err := truth.Set(keyFmt(k), valFmt(k)) - require.NoError(t, err) - case opSetRange: - start := randInt(maxKey - 2) - end := randInt(maxKey-start) + start - setRange(t, st, truth, start, end) - case opDel: - k := randInt(maxKey) - st.Delete(keyFmt(k)) - err := truth.Delete(keyFmt(k)) - require.NoError(t, err) - case opDelRange: - start := randInt(maxKey - 2) - end := randInt(maxKey-start) + start - deleteRange(t, st, truth, start, end) - case opWrite: - st.Write() - } -} - -//------------------------------------------------------------------------------------------- - -// iterate over whole domain -func assertIterateDomain(t *testing.T, st types.KVStore, expectedN int) { - t.Helper() - itr := st.Iterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, expectedN, i) - require.NoError(t, itr.Close()) -} - -func assertIterateDomainCheck(t *testing.T, st types.KVStore, mem dbm.DB, r []keyRange) { - t.Helper() - // iterate over each and check they match the other - itr := st.Iterator(nil, nil) - itr2, err := mem.Iterator(nil, nil) // ground truth - require.NoError(t, err) - - krc := newKeyRangeCounter(r) - i := 0 - - for ; krc.valid(); krc.next() { - require.True(t, itr.Valid()) - require.True(t, itr2.Valid()) - - // check the key/val matches the ground truth - k, v := itr.Key(), itr.Value() - k2, v2 := itr2.Key(), itr2.Value() - require.Equal(t, k, k2) - require.Equal(t, v, v2) - - // check they match the counter - require.Equal(t, k, keyFmt(krc.key())) - - itr.Next() - itr2.Next() - i++ - } - - require.False(t, itr.Valid()) - require.False(t, itr2.Valid()) - require.NoError(t, itr.Close()) - require.NoError(t, itr2.Close()) -} - -func assertIterateDomainCompare(t *testing.T, st types.KVStore, mem dbm.DB) { - t.Helper() - // iterate over each and check they match the other - itr := st.Iterator(nil, nil) - itr2, err := mem.Iterator(nil, nil) // ground truth - require.NoError(t, err) - checkIterators(t, itr, itr2) - checkIterators(t, itr2, itr) - require.NoError(t, itr.Close()) - require.NoError(t, itr2.Close()) -} - -func checkIterators(t *testing.T, itr, itr2 types.Iterator) { - t.Helper() - for ; itr.Valid(); itr.Next() { - require.True(t, itr2.Valid()) - k, v := itr.Key(), itr.Value() - k2, v2 := itr2.Key(), itr2.Value() - require.Equal(t, k, k2) - require.Equal(t, v, v2) - itr2.Next() - } - require.False(t, itr.Valid()) - require.False(t, itr2.Valid()) -} - -//-------------------------------------------------------- - -func setRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { - t.Helper() - for i := start; i < end; i++ { - st.Set(keyFmt(i), valFmt(i)) - err := mem.Set(keyFmt(i), valFmt(i)) - require.NoError(t, err) - } -} - -func deleteRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { - t.Helper() - for i := start; i < end; i++ { - st.Delete(keyFmt(i)) - err := mem.Delete(keyFmt(i)) - require.NoError(t, err) - } -} - -//-------------------------------------------------------- - -type keyRange struct { - start int - end int -} - -func (kr keyRange) len() int { - return kr.end - kr.start -} - -func newKeyRangeCounter(kr []keyRange) *keyRangeCounter { - return &keyRangeCounter{keyRanges: kr} -} - -// we can iterate over this and make sure our real iterators have all the right keys -type keyRangeCounter struct { - rangeIdx int - idx int - keyRanges []keyRange -} - -func (krc *keyRangeCounter) valid() bool { - maxRangeIdx := len(krc.keyRanges) - 1 - maxRange := krc.keyRanges[maxRangeIdx] - - // if we're not in the max range, we're valid - if krc.rangeIdx <= maxRangeIdx && - krc.idx < maxRange.len() { - return true - } - - return false -} - -func (krc *keyRangeCounter) next() { - thisKeyRange := krc.keyRanges[krc.rangeIdx] - if krc.idx == thisKeyRange.len()-1 { - krc.rangeIdx++ - krc.idx = 0 - } else { - krc.idx++ - } -} - -func (krc *keyRangeCounter) key() int { - thisKeyRange := krc.keyRanges[krc.rangeIdx] - return thisKeyRange.start + krc.idx -} - -//-------------------------------------------------------- - -func bz(s string) []byte { return []byte(s) } - -func BenchmarkCacheKVStoreGetNoKeyFound(b *testing.B) { - b.ReportAllocs() - st := newCacheKVStore() - b.ResetTimer() - // assumes b.N < 2**24 - for i := 0; i < b.N; i++ { - st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) - } -} - -func BenchmarkCacheKVStoreGetKeyFound(b *testing.B) { - b.ReportAllocs() - st := newCacheKVStore() - for i := 0; i < b.N; i++ { - arr := []byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)} - st.Set(arr, arr) - } - b.ResetTimer() - // assumes b.N < 2**24 - for i := 0; i < b.N; i++ { - st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) - } -} diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go deleted file mode 100644 index 696911370c..0000000000 --- a/store/cachemulti/store.go +++ /dev/null @@ -1,170 +0,0 @@ -package cachemulti - -import ( - "fmt" - "io" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -// storeNameCtxKey is the TraceContext metadata key that identifies -// the store which emitted a given trace. -const storeNameCtxKey = "store_name" - -//---------------------------------------- -// Store - -// Store holds many branched stores. -// Implements MultiStore. -// NOTE: a Store (and MultiStores in general) should never expose the -// keys for the substores. -type Store struct { - db types.CacheKVStore - stores map[types.StoreKey]types.CacheWrap - keys map[string]types.StoreKey - - traceWriter io.Writer - traceContext types.TraceContext -} - -var _ types.CacheMultiStore = Store{} - -// NewFromKVStore creates a new Store object from a mapping of store keys to -// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store -// is a branched store. -func NewFromKVStore( - store types.KVStore, stores map[types.StoreKey]types.CacheWrapper, - keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, -) Store { - cms := Store{ - db: cachekv.NewStore(store), - stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), - keys: keys, - traceWriter: traceWriter, - traceContext: traceContext, - } - - for key, store := range stores { - if cms.TracingEnabled() { - tctx := cms.traceContext.Clone().Merge(types.TraceContext{ - storeNameCtxKey: key.Name(), - }) - - store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx) - } - cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) - } - - return cms -} - -// NewStore creates a new Store object from a mapping of store keys to -// CacheWrapper objects. Each CacheWrapper store is a branched store. -func NewStore( - db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, - traceWriter io.Writer, traceContext types.TraceContext, -) Store { - return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext) -} - -func newCacheMultiStoreFromCMS(cms Store) Store { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range cms.stores { - stores[k] = v - } - - return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext) -} - -// SetTracer sets the tracer for the MultiStore that the underlying -// stores will utilize to trace operations. A MultiStore is returned. -func (cms Store) SetTracer(w io.Writer) types.MultiStore { - cms.traceWriter = w - return cms -} - -// SetTracingContext updates the tracing context for the MultiStore by merging -// the given context with the existing context by key. Any existing keys will -// be overwritten. It is implied that the caller should update the context when -// necessary between tracing operations. It returns a modified MultiStore. -func (cms Store) SetTracingContext(tc types.TraceContext) types.MultiStore { - if cms.traceContext != nil { - for k, v := range tc { - cms.traceContext[k] = v - } - } else { - cms.traceContext = tc - } - - return cms -} - -// TracingEnabled returns if tracing is enabled for the MultiStore. -func (cms Store) TracingEnabled() bool { - return cms.traceWriter != nil -} - -// LatestVersion returns the branch version of the store -func (cms Store) LatestVersion() int64 { - panic("cannot get latest version from branch cached multi-store") -} - -// GetStoreType returns the type of the store. -func (cms Store) GetStoreType() types.StoreType { - return types.StoreTypeMulti -} - -// Write calls Write on each underlying store. -func (cms Store) Write() { - cms.db.Write() - for _, store := range cms.stores { - store.Write() - } -} - -// Implements CacheWrapper. -func (cms Store) CacheWrap() types.CacheWrap { - return cms.CacheMultiStore().(types.CacheWrap) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - return cms.CacheWrap() -} - -// Implements MultiStore. -func (cms Store) CacheMultiStore() types.CacheMultiStore { - return newCacheMultiStoreFromCMS(cms) -} - -// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic -// as an already cached multi-store cannot load previous versions. -// -// TODO: The store implementation can possibly be modified to support this as it -// seems safe to load previous versions (heights). -func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) { - panic("cannot branch cached multi-store with a version") -} - -// GetStore returns an underlying Store by key. -func (cms Store) GetStore(key types.StoreKey) types.Store { - s := cms.stores[key] - if key == nil || s == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) - } - return s.(types.Store) -} - -// GetKVStore returns an underlying KVStore by key. -func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { - store := cms.stores[key] - if key == nil || store == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) - } - return store.(types.KVStore) -} diff --git a/store/cachemulti/store_test.go b/store/cachemulti/store_test.go deleted file mode 100644 index 0ea7785bff..0000000000 --- a/store/cachemulti/store_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package cachemulti - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" -) - -func TestStoreGetKVStore(t *testing.T) { - require := require.New(t) - - s := Store{stores: map[types.StoreKey]types.CacheWrap{}} - key := types.NewKVStoreKey("abc") - errMsg := fmt.Sprintf("kv store with key %v has not been registered in stores", key) - - require.PanicsWithValue(errMsg, - func() { s.GetStore(key) }) - - require.PanicsWithValue(errMsg, - func() { s.GetKVStore(key) }) -} diff --git a/store/changeset.go b/store/changeset.go new file mode 100644 index 0000000000..182d8d5c54 --- /dev/null +++ b/store/changeset.go @@ -0,0 +1,39 @@ +package store + +// KVPair defines a key-value pair with additional metadata that is used to +// track writes. Deletion can be denoted by a nil value or explicitly by the +// Delete field. +type KVPair struct { + Key []byte + Value []byte + StoreKey string // optional +} + +// Changeset defines a set of KVPair entries. +type Changeset struct { + Pairs []KVPair +} + +func NewChangeset(pairs ...KVPair) *Changeset { + return &Changeset{ + Pairs: pairs, + } +} + +// Size returns the number of key-value pairs in the batch. +func (cs *Changeset) Size() int { + return len(cs.Pairs) +} + +// Add adds a key-value pair to the ChangeSet. +func (cs *Changeset) Add(key, value []byte) { + cs.Pairs = append(cs.Pairs, KVPair{ + Key: key, + Value: value, + }) +} + +// AddKVPair adds a KVPair to the ChangeSet. +func (cs *Changeset) AddKVPair(pair KVPair) { + cs.Pairs = append(cs.Pairs, pair) +} diff --git a/store/commit_info.go b/store/commit_info.go new file mode 100644 index 0000000000..103118ece0 --- /dev/null +++ b/store/commit_info.go @@ -0,0 +1,82 @@ +package store + +import ( + "fmt" + "time" + + "cosmossdk.io/store/v2/internal/maps" +) + +type ( + // CommitHeader defines the interface for a block header that can be provided + // to a MultiStore upon Commit. This should be optional and used to facilitate + // time-based queries only. + CommitHeader interface { + GetTime() time.Time + GetHeight() uint64 + } + + // CommitInfo defines commit information used by the multi-store when committing + // a version/height. + CommitInfo struct { + Version uint64 + StoreInfos []StoreInfo + Timestamp time.Time + } + + // StoreInfo defines store-specific commit information. It contains a reference + // between a store name/key and the commit ID. + StoreInfo struct { + Name string + CommitID CommitID + } + + // CommitID defines the commitment information when a specific store is + // committed. + CommitID struct { + Version uint64 + Hash []byte + } +) + +func (si StoreInfo) GetHash() []byte { + return si.CommitID.Hash +} + +// Hash returns the root hash of all committed stores represented by CommitInfo, +// sorted by store name/key. +func (ci CommitInfo) Hash() []byte { + if len(ci.StoreInfos) == 0 { + return nil + } + + rootHash, _, _ := maps.ProofsFromMap(ci.toMap()) + return rootHash +} + +func (ci CommitInfo) toMap() map[string][]byte { + m := make(map[string][]byte, len(ci.StoreInfos)) + for _, storeInfo := range ci.StoreInfos { + m[storeInfo.Name] = storeInfo.GetHash() + } + + return m +} + +func (ci CommitInfo) CommitID() CommitID { + return CommitID{ + Version: ci.Version, + Hash: ci.Hash(), + } +} + +func (m *CommitInfo) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + +func (cid CommitID) String() string { + return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version) +} diff --git a/store/commitment/README.md b/store/commitment/README.md new file mode 100644 index 0000000000..4843ef0db5 --- /dev/null +++ b/store/commitment/README.md @@ -0,0 +1,3 @@ +# State Commitment (SC) + +TODO diff --git a/store/commitment/db.go b/store/commitment/db.go new file mode 100644 index 0000000000..85da3e5de0 --- /dev/null +++ b/store/commitment/db.go @@ -0,0 +1,80 @@ +package commitment + +import ( + "sync" + + ics23 "github.com/cosmos/ics23/go" + + "cosmossdk.io/store/v2" +) + +// Database represents a state commitment store. It is designed to securely store +// and manage the most recent state information, crucial for achieving consensus. +// Each module creates its own instance of Database for managing its specific state. +type Database struct { + mu sync.Mutex + tree store.Tree +} + +// NewDatabase creates a new Database instance. +func NewDatabase(tree store.Tree) *Database { + return &Database{ + tree: tree, + } +} + +// WriteBatch writes a batch of key-value pairs to the database. +func (db *Database) WriteBatch(cs *store.Changeset) error { + db.mu.Lock() + defer db.mu.Unlock() + + return db.tree.WriteBatch(cs) +} + +// WorkingHash returns the working hash of the database. +func (db *Database) WorkingHash() []byte { + db.mu.Lock() + defer db.mu.Unlock() + + return db.tree.WorkingHash() +} + +// LoadVersion loads the state at the given version. +func (db *Database) LoadVersion(version uint64) error { + db.mu.Lock() + defer db.mu.Unlock() + + return db.tree.LoadVersion(version) +} + +// Commit commits the current state to the database. +func (db *Database) Commit() ([]byte, error) { + db.mu.Lock() + defer db.mu.Unlock() + + return db.tree.Commit() +} + +// GetProof returns a proof for the given key and version. +func (db *Database) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { + db.mu.Lock() + defer db.mu.Unlock() + + return db.tree.GetProof(version, key) +} + +// GetLatestVersion returns the latest version of the database. +func (db *Database) GetLatestVersion() uint64 { + db.mu.Lock() + defer db.mu.Unlock() + + return db.tree.GetLatestVersion() +} + +// Close closes the database and releases all resources. +func (db *Database) Close() error { + db.mu.Lock() + defer db.mu.Unlock() + + return db.tree.Close() +} diff --git a/store/commitment/db_test.go b/store/commitment/db_test.go new file mode 100644 index 0000000000..546ed1df01 --- /dev/null +++ b/store/commitment/db_test.go @@ -0,0 +1,84 @@ +package commitment + +import ( + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment/iavl" +) + +func generateTree(treeType string) store.Tree { + if treeType == "iavl" { + cfg := iavl.DefaultConfig() + db := dbm.NewMemDB() + tree := iavl.NewIavlTree(db, log.NewNopLogger(), cfg) + + return tree + } + + return nil +} + +func TestIavlTree(t *testing.T) { + // generate a new tree + tree := generateTree("iavl") + require.NotNil(t, tree) + + initVersion := tree.GetLatestVersion() + require.Equal(t, uint64(0), initVersion) + + // write a batch of version 1 + cs1 := store.NewChangeset() + cs1.Add([]byte("key1"), []byte("value1")) + cs1.Add([]byte("key2"), []byte("value2")) + cs1.Add([]byte("key3"), []byte("value3")) + + err := tree.WriteBatch(cs1) + require.NoError(t, err) + + workingHash := tree.WorkingHash() + require.NotNil(t, workingHash) + require.Equal(t, uint64(0), tree.GetLatestVersion()) + + // commit the batch + commitHash, err := tree.Commit() + require.NoError(t, err) + require.Equal(t, workingHash, commitHash) + require.Equal(t, uint64(1), tree.GetLatestVersion()) + version1Hash := tree.WorkingHash() + + // write a batch of version 2 + cs2 := store.NewChangeset() + cs2.Add([]byte("key4"), []byte("value4")) + cs2.Add([]byte("key5"), []byte("value5")) + cs2.Add([]byte("key6"), []byte("value6")) + cs2.Add([]byte("key1"), nil) // delete key1 + err = tree.WriteBatch(cs2) + require.NoError(t, err) + workingHash = tree.WorkingHash() + require.NotNil(t, workingHash) + commitHash, err = tree.Commit() + require.NoError(t, err) + require.Equal(t, workingHash, commitHash) + + // get proof for key1 + proof, err := tree.GetProof(1, []byte("key1")) + require.NoError(t, err) + require.NotNil(t, proof.GetExist()) + + proof, err = tree.GetProof(2, []byte("key1")) + require.NoError(t, err) + require.NotNil(t, proof.GetNonexist()) + + // load version 1 + err = tree.LoadVersion(1) + require.NoError(t, err) + require.Equal(t, version1Hash, tree.WorkingHash()) + + // close the db + require.NoError(t, tree.Close()) +} diff --git a/store/commitment/iavl/config.go b/store/commitment/iavl/config.go new file mode 100644 index 0000000000..7e386b3a46 --- /dev/null +++ b/store/commitment/iavl/config.go @@ -0,0 +1,15 @@ +package iavl + +// Config is the configuration for the IAVL tree. +type Config struct { + CacheSize int `mapstructure:"cache_size"` + SkipFastStorageUpgrade bool `mapstructure:"skip_fast_storage_upgrade"` +} + +// DefaultConfig returns the default configuration for the IAVL tree. +func DefaultConfig() *Config { + return &Config{ + CacheSize: 1000, + SkipFastStorageUpgrade: false, + } +} diff --git a/store/commitment/iavl/tree.go b/store/commitment/iavl/tree.go new file mode 100644 index 0000000000..671e60b128 --- /dev/null +++ b/store/commitment/iavl/tree.go @@ -0,0 +1,84 @@ +package iavl + +import ( + "fmt" + + dbm "github.com/cosmos/cosmos-db" + "github.com/cosmos/iavl" + ics23 "github.com/cosmos/ics23/go" + + log "cosmossdk.io/log" + "cosmossdk.io/store/v2" +) + +var _ store.Tree = (*IavlTree)(nil) + +// IavlTree is a wrapper around iavl.MutableTree. +type IavlTree struct { + tree *iavl.MutableTree +} + +// NewIavlTree creates a new IavlTree instance. +func NewIavlTree(db dbm.DB, logger log.Logger, cfg *Config) *IavlTree { + tree := iavl.NewMutableTree(db, cfg.CacheSize, cfg.SkipFastStorageUpgrade, logger) + return &IavlTree{ + tree: tree, + } +} + +// WriteBatch writes a batch of key-value pairs to the database. +func (t *IavlTree) WriteBatch(cs *store.Changeset) error { + for _, kv := range cs.Pairs { + if kv.Value == nil { + _, res, err := t.tree.Remove(kv.Key) + if err != nil { + return err + } + if !res { + return fmt.Errorf("failed to delete key %X", kv.Key) + } + } else { + _, err := t.tree.Set(kv.Key, kv.Value) + if err != nil { + return err + } + } + } + return nil +} + +// WorkingHash returns the working hash of the database. +func (t *IavlTree) WorkingHash() []byte { + return t.tree.WorkingHash() +} + +// LoadVersion loads the state at the given version. +func (t *IavlTree) LoadVersion(version uint64) error { + return t.tree.LoadVersionForOverwriting(int64(version)) +} + +// Commit commits the current state to the database. +func (t *IavlTree) Commit() ([]byte, error) { + hash, _, err := t.tree.SaveVersion() + return hash, err +} + +// GetProof returns a proof for the given key and version. +func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { + imutableTree, err := t.tree.GetImmutable(int64(version)) + if err != nil { + return nil, err + } + + return imutableTree.GetProof(key) +} + +// GetLatestVersion returns the latest version of the database. +func (t *IavlTree) GetLatestVersion() uint64 { + return uint64(t.tree.Version()) +} + +// Close closes the iavl tree. +func (t *IavlTree) Close() error { + return nil +} diff --git a/store/database.go b/store/database.go new file mode 100644 index 0000000000..df5ad19706 --- /dev/null +++ b/store/database.go @@ -0,0 +1,69 @@ +package store + +import ( + "io" +) + +// Reader wraps the Has and Get method of a backing data store. +type Reader interface { + // Has retrieves if a key is present in the key-value data store. + // + // Note: is safe to modify and read after calling Has. + Has(storeKey string, key []byte) (bool, error) + + // Get retrieves the given key if it's present in the key-value data store. + // + // Note: is safe to modify and read after calling Get. + // The returned byte slice is safe to read, but cannot be modified. + Get(storeKey string, key []byte) ([]byte, error) +} + +// Writer wraps the Set method of a backing data store. +type Writer interface { + // Set inserts the given value into the key-value data store. + // + // Note: are safe to modify and read after calling Set. + Set(storeKey string, key, value []byte) error + + // Delete removes the key from the backing key-value data store. + // + // Note: is safe to modify and read after calling Delete. + Delete(storeKey string, key []byte) error +} + +// Database contains all the methods required to allow handling different +// key-value data stores backing the database. +type Database interface { + Reader + Writer + IteratorCreator + io.Closer +} + +// VersionedDatabase defines an API for a versioned database that allows reads, +// writes, iteration and commitment over a series of versions. +type VersionedDatabase interface { + Has(storeKey string, version uint64, key []byte) (bool, error) + Get(storeKey string, version uint64, key []byte) ([]byte, error) + GetLatestVersion() (uint64, error) + SetLatestVersion(version uint64) error + + Iterator(storeKey string, version uint64, start, end []byte) (Iterator, error) + ReverseIterator(storeKey string, version uint64, start, end []byte) (Iterator, error) + + ApplyChangeset(version uint64, cs *Changeset) error + + // Prune attempts to prune all versions up to and including the provided + // version argument. The operation should be idempotent. An error should be + // returned upon failure. + Prune(version uint64) error + + // Close releases associated resources. It should NOT be idempotent. It must + // only be called once and any call after may panic. + io.Closer +} + +// Committer defines a contract for committing state. +type Committer interface { + Commit() error +} diff --git a/store/dbadapter/store.go b/store/dbadapter/store.go deleted file mode 100644 index 013e26df20..0000000000 --- a/store/dbadapter/store.go +++ /dev/null @@ -1,90 +0,0 @@ -package dbadapter - -import ( - "io" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -// Wrapper type for dbm.Db with implementation of KVStore -type Store struct { - dbm.DB -} - -// Get wraps the underlying DB's Get method panicing on error. -func (dsa Store) Get(key []byte) []byte { - v, err := dsa.DB.Get(key) - if err != nil { - panic(err) - } - - return v -} - -// Has wraps the underlying DB's Has method panicing on error. -func (dsa Store) Has(key []byte) bool { - ok, err := dsa.DB.Has(key) - if err != nil { - panic(err) - } - - return ok -} - -// Set wraps the underlying DB's Set method panicing on error. -func (dsa Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - if err := dsa.DB.Set(key, value); err != nil { - panic(err) - } -} - -// Delete wraps the underlying DB's Delete method panicing on error. -func (dsa Store) Delete(key []byte) { - if err := dsa.DB.Delete(key); err != nil { - panic(err) - } -} - -// Iterator wraps the underlying DB's Iterator method panicing on error. -func (dsa Store) Iterator(start, end []byte) types.Iterator { - iter, err := dsa.DB.Iterator(start, end) - if err != nil { - panic(err) - } - - return iter -} - -// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error. -func (dsa Store) ReverseIterator(start, end []byte) types.Iterator { - iter, err := dsa.DB.ReverseIterator(start, end) - if err != nil { - panic(err) - } - - return iter -} - -// GetStoreType returns the type of the store. -func (Store) GetStoreType() types.StoreType { - return types.StoreTypeDB -} - -// CacheWrap branches the underlying store. -func (dsa Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(dsa) -} - -// CacheWrapWithTrace implements KVStore. -func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(dsa, w, tc)) -} - -// dbm.DB implements KVStore so we can CacheKVStore it. -var _ types.KVStore = Store{} diff --git a/store/dbadapter/store_test.go b/store/dbadapter/store_test.go deleted file mode 100644 index 9685887f91..0000000000 --- a/store/dbadapter/store_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package dbadapter_test - -import ( - "bytes" - "errors" - "testing" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/mock" - "cosmossdk.io/store/types" -) - -var errFoo = errors.New("dummy") - -func TestAccessors(t *testing.T) { - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - mockDB := mock.NewMockDB(mockCtrl) - store := dbadapter.Store{mockDB} - key := []byte("test") - value := []byte("testvalue") - - require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - - require.Equal(t, types.StoreTypeDB, store.GetStoreType()) - store.GetStoreType() - - retFoo := []byte("xxx") - mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(retFoo, nil) - require.True(t, bytes.Equal(retFoo, store.Get(key))) - - mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(nil, errFoo) - require.Panics(t, func() { store.Get(key) }) - - mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(true, nil) - require.True(t, store.Has(key)) - - mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, nil) - require.False(t, store.Has(key)) - - mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, errFoo) - require.Panics(t, func() { store.Has(key) }) - - mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(nil) - require.NotPanics(t, func() { store.Set(key, value) }) - - mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(errFoo) - require.Panics(t, func() { store.Set(key, value) }) - - mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(nil) - require.NotPanics(t, func() { store.Delete(key) }) - - mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(errFoo) - require.Panics(t, func() { store.Delete(key) }) - - start, end := []byte("start"), []byte("end") - mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil) - require.NotPanics(t, func() { store.Iterator(start, end) }) - - mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo) - require.Panics(t, func() { store.Iterator(start, end) }) - - mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil) - require.NotPanics(t, func() { store.ReverseIterator(start, end) }) - - mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo) - require.Panics(t, func() { store.ReverseIterator(start, end) }) -} - -func TestCacheWraps(t *testing.T) { - mockCtrl := gomock.NewController(t) - mockDB := mock.NewMockDB(mockCtrl) - store := dbadapter.Store{mockDB} - - cacheWrapper := store.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} diff --git a/store/types/errors.go b/store/errors.go similarity index 65% rename from store/types/errors.go rename to store/errors.go index db86a3cc65..4563be7a29 100644 --- a/store/types/errors.go +++ b/store/errors.go @@ -1,14 +1,16 @@ -package types +package store import ( "cosmossdk.io/errors" ) +// StoreCodespace defines the store package's unique error code space. const StoreCodespace = "store" var ( // ErrInvalidProof is returned when a proof is invalid ErrInvalidProof = errors.Register(StoreCodespace, 2, "invalid proof") + // ErrTxDecode is returned if we cannot parse a transaction ErrTxDecode = errors.Register(StoreCodespace, 3, "tx parse error") @@ -22,7 +24,15 @@ var ( // ErrConflict defines a conflict error, e.g. when two goroutines try to access // the same resource and one of them fails. ErrConflict = errors.Register(StoreCodespace, 6, "conflict") + // ErrInvalidRequest defines an ABCI typed error where the request contains // invalid data. ErrInvalidRequest = errors.Register(StoreCodespace, 7, "invalid request") + + ErrClosed = errors.Register(StoreCodespace, 8, "closed") + ErrRecordNotFound = errors.Register(StoreCodespace, 9, "record not found") + ErrUnknownStoreKey = errors.Register(StoreCodespace, 10, "unknown store key") + ErrInvalidVersion = errors.Register(StoreCodespace, 11, "invalid version") + ErrKeyEmpty = errors.Register(StoreCodespace, 12, "key empty") + ErrStartAfterEnd = errors.Register(StoreCodespace, 13, "start key after end key") ) diff --git a/store/gaskv/store.go b/store/gaskv/store.go deleted file mode 100644 index e0f96af715..0000000000 --- a/store/gaskv/store.go +++ /dev/null @@ -1,176 +0,0 @@ -package gaskv - -import ( - "io" - - "cosmossdk.io/store/types" -) - -var _ types.KVStore = &Store{} - -// Store applies gas tracking to an underlying KVStore. It implements the -// KVStore interface. -type Store struct { - gasMeter types.GasMeter - gasConfig types.GasConfig - parent types.KVStore -} - -// NewStore returns a reference to a new GasKVStore. -func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store { - kvs := &Store{ - gasMeter: gasMeter, - gasConfig: gasConfig, - parent: parent, - } - return kvs -} - -// Implements Store. -func (gs *Store) GetStoreType() types.StoreType { - return gs.parent.GetStoreType() -} - -// Implements KVStore. -func (gs *Store) Get(key []byte) (value []byte) { - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc) - value = gs.parent.Get(key) - - // TODO overflow-safe math? - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasReadPerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc) - - return value -} - -// Implements KVStore. -func (gs *Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc) - // TODO overflow-safe math? - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(key)), types.GasWritePerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc) - gs.parent.Set(key, value) -} - -// Implements KVStore. -func (gs *Store) Has(key []byte) bool { - gs.gasMeter.ConsumeGas(gs.gasConfig.HasCost, types.GasHasDesc) - return gs.parent.Has(key) -} - -// Implements KVStore. -func (gs *Store) Delete(key []byte) { - // charge gas to prevent certain attack vectors even though space is being freed - gs.gasMeter.ConsumeGas(gs.gasConfig.DeleteCost, types.GasDeleteDesc) - gs.parent.Delete(key) -} - -// Iterator implements the KVStore interface. It returns an iterator which -// incurs a flat gas cost for seeking to the first key/value pair and a variable -// gas cost based on the current value's length if the iterator is valid. -func (gs *Store) Iterator(start, end []byte) types.Iterator { - return gs.iterator(start, end, true) -} - -// ReverseIterator implements the KVStore interface. It returns a reverse -// iterator which incurs a flat gas cost for seeking to the first key/value pair -// and a variable gas cost based on the current value's length if the iterator -// is valid. -func (gs *Store) ReverseIterator(start, end []byte) types.Iterator { - return gs.iterator(start, end, false) -} - -// Implements KVStore. -func (gs *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a GasKVStore") -} - -// CacheWrapWithTrace implements the KVStore interface. -func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a GasKVStore") -} - -func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator - if ascending { - parent = gs.parent.Iterator(start, end) - } else { - parent = gs.parent.ReverseIterator(start, end) - } - - gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent) - gi.(*gasIterator).consumeSeekGas() - - return gi -} - -type gasIterator struct { - gasMeter types.GasMeter - gasConfig types.GasConfig - parent types.Iterator -} - -func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.Iterator) types.Iterator { - return &gasIterator{ - gasMeter: gasMeter, - gasConfig: gasConfig, - parent: parent, - } -} - -// Implements Iterator. -func (gi *gasIterator) Domain() (start, end []byte) { - return gi.parent.Domain() -} - -// Implements Iterator. -func (gi *gasIterator) Valid() bool { - return gi.parent.Valid() -} - -// Next implements the Iterator interface. It seeks to the next key/value pair -// in the iterator. It incurs a flat gas cost for seeking and a variable gas -// cost based on the current value's length if the iterator is valid. -func (gi *gasIterator) Next() { - gi.consumeSeekGas() - gi.parent.Next() -} - -// Key implements the Iterator interface. It returns the current key and it does -// not incur any gas cost. -func (gi *gasIterator) Key() (key []byte) { - key = gi.parent.Key() - return key -} - -// Value implements the Iterator interface. It returns the current value and it -// does not incur any gas cost. -func (gi *gasIterator) Value() (value []byte) { - value = gi.parent.Value() - return value -} - -// Implements Iterator. -func (gi *gasIterator) Close() error { - return gi.parent.Close() -} - -// Error delegates the Error call to the parent iterator. -func (gi *gasIterator) Error() error { - return gi.parent.Error() -} - -// consumeSeekGas consumes on each iteration step a flat gas cost and a variable gas cost -// based on the current value's length. -func (gi *gasIterator) consumeSeekGas() { - if gi.Valid() { - key := gi.Key() - value := gi.Value() - - gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasValuePerByteDesc) - gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc) - } - gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc) -} diff --git a/store/gaskv/store_test.go b/store/gaskv/store_test.go deleted file mode 100644 index 354832d17c..0000000000 --- a/store/gaskv/store_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package gaskv_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/gaskv" - "cosmossdk.io/store/types" -) - -func bz(s string) []byte { return []byte(s) } - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -func TestGasKVStoreBasic(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(10000) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - - require.Equal(t, types.StoreTypeDB, st.GetStoreType()) - require.Panics(t, func() { st.CacheWrap() }) - require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) }) - - require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - st.Set(keyFmt(1), valFmt(1)) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - st.Delete(keyFmt(1)) - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - require.Equal(t, meter.GasConsumed(), types.Gas(6858)) -} - -func TestGasKVStoreIterator(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(100000) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - require.False(t, st.Has(keyFmt(1))) - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - require.Empty(t, st.Get(keyFmt(2)), "Expected `key2` to be empty") - require.Empty(t, st.Get(keyFmt(3)), "Expected `key3` to be empty") - - st.Set(keyFmt(1), valFmt(1)) - require.True(t, st.Has(keyFmt(1))) - st.Set(keyFmt(2), valFmt(2)) - require.True(t, st.Has(keyFmt(2))) - st.Set(keyFmt(3), valFmt(0)) - - iterator := st.Iterator(nil, nil) - start, end := iterator.Domain() - require.Nil(t, start) - require.Nil(t, end) - require.NoError(t, iterator.Error()) - - t.Cleanup(func() { - if err := iterator.Close(); err != nil { - t.Fatal(err) - } - }) - ka := iterator.Key() - require.Equal(t, ka, keyFmt(1)) - va := iterator.Value() - require.Equal(t, va, valFmt(1)) - iterator.Next() - kb := iterator.Key() - require.Equal(t, kb, keyFmt(2)) - vb := iterator.Value() - require.Equal(t, vb, valFmt(2)) - iterator.Next() - require.Equal(t, types.Gas(14565), meter.GasConsumed()) - kc := iterator.Key() - require.Equal(t, kc, keyFmt(3)) - vc := iterator.Value() - require.Equal(t, vc, valFmt(0)) - iterator.Next() - require.Equal(t, types.Gas(14667), meter.GasConsumed()) - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.Equal(t, types.Gas(14697), meter.GasConsumed()) - require.NoError(t, iterator.Error()) - - reverseIterator := st.ReverseIterator(nil, nil) - t.Cleanup(func() { - if err := reverseIterator.Close(); err != nil { - t.Fatal(err) - } - }) - require.Equal(t, reverseIterator.Key(), keyFmt(3)) - reverseIterator.Next() - require.Equal(t, reverseIterator.Key(), keyFmt(2)) - reverseIterator.Next() - require.Equal(t, reverseIterator.Key(), keyFmt(1)) - reverseIterator.Next() - require.False(t, reverseIterator.Valid()) - require.Panics(t, reverseIterator.Next) - require.Equal(t, types.Gas(15135), meter.GasConsumed()) -} - -func TestGasKVStoreOutOfGasSet(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(0) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - require.Panics(t, func() { st.Set(keyFmt(1), valFmt(1)) }, "Expected out-of-gas") -} - -func TestGasKVStoreOutOfGasIterator(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(20000) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - st.Set(keyFmt(1), valFmt(1)) - iterator := st.Iterator(nil, nil) - iterator.Next() - require.Panics(t, func() { iterator.Value() }, "Expected out-of-gas") -} diff --git a/store/go.mod b/store/go.mod index e4fdbeac14..767c876c5e 100644 --- a/store/go.mod +++ b/store/go.mod @@ -1,79 +1,83 @@ -module cosmossdk.io/store +module cosmossdk.io/store/v2 -go 1.20 +go 1.21 require ( cosmossdk.io/errors v1.0.0 cosmossdk.io/log v1.2.1 cosmossdk.io/math v1.1.3-rc.1 + github.com/cockroachdb/errors v1.11.1 + github.com/cockroachdb/pebble v0.0.0-20230819001538-1798fbf5956c github.com/cometbft/cometbft v0.38.0 github.com/cosmos/cosmos-db v1.0.0 github.com/cosmos/gogoproto v1.4.11 github.com/cosmos/iavl v1.0.0-rc.1 github.com/cosmos/ics23/go v0.10.0 - github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.3 // indirect - github.com/hashicorp/go-hclog v1.5.0 - github.com/hashicorp/go-plugin v1.5.2 - github.com/hashicorp/golang-lru v1.0.2 - github.com/spf13/cast v1.5.1 // indirect + github.com/linxGnu/grocksdb v1.8.4 github.com/stretchr/testify v1.8.4 github.com/tidwall/btree v1.7.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - google.golang.org/grpc v1.59.0 - google.golang.org/protobuf v1.31.0 - gotest.tools/v3 v3.5.1 + modernc.org/sqlite v1.25.0 ) -require github.com/hashicorp/go-metrics v0.5.1 - require ( - github.com/DataDog/zstd v1.5.5 // indirect + github.com/DataDog/zstd v1.4.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20230525220056-bb4fc9527b3b // indirect github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/dot v1.4.2 // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/getsentry/sentry-go v0.23.0 // indirect + github.com/getsentry/sentry-go v0.18.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/hashicorp/go-immutable-radix v1.0.0 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/jhump/protoreflect v1.15.3 // indirect - github.com/klauspost/compress v1.16.5 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.16.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/linxGnu/grocksdb v1.7.16 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect - github.com/rs/zerolog v1.31.0 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/rs/zerolog v1.30.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.1 // indirect + lukechampine.com/uint128 v1.2.0 // indirect + modernc.org/cc/v3 v3.40.0 // indirect + modernc.org/ccgo/v3 v3.16.13 // indirect + modernc.org/libc v1.24.1 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.6.0 // indirect + modernc.org/opt v0.1.3 // indirect + modernc.org/strutil v1.1.3 // indirect + modernc.org/token v1.0.1 // indirect ) diff --git a/store/go.sum b/store/go.sum index b84306dbde..21414054f9 100644 --- a/store/go.sum +++ b/store/go.sum @@ -4,39 +4,30 @@ cosmossdk.io/log v1.2.1 h1:Xc1GgTCicniwmMiKwDxUjO4eLhPxoVdI9vtMW8Ti/uk= cosmossdk.io/log v1.2.1/go.mod h1:GNSCc/6+DhFIj1aLn/j7Id7PaO8DzNylUZoOYBL9+I4= cosmossdk.io/math v1.1.3-rc.1 h1:NebCNWDqb1MJRNfvxr4YY7d8FSYgkuB3L75K6xvM+Zo= cosmossdk.io/math v1.1.3-rc.1/go.mod h1:l2Gnda87F0su8a/7FEKJfFdJrM0JZRXQaohlgJeyQh0= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= +github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230525220056-bb4fc9527b3b h1:LCs8gDhg6vt8A3dN7AEJxmCoETZ4qkySoVJVm3rcSJk= -github.com/cockroachdb/pebble v0.0.0-20230525220056-bb4fc9527b3b/go.mod h1:TkdVsGYRqtULUppt2RbC+YaKtTHnHoWa2apfFrSKABw= +github.com/cockroachdb/pebble v0.0.0-20230819001538-1798fbf5956c h1:aDetJlMe4qJxWAwu+/bzTs2/b1EW9ecVyawpRD7N/tE= +github.com/cockroachdb/pebble v0.0.0-20230819001538-1798fbf5956c/go.mod h1:EDjiaAXc0FXiRmxDzcu1wIEJ093ohHMUWxrI6iku0XA= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/cometbft/cometbft v0.38.0 h1:ogKnpiPX7gxCvqTEF4ly25/wAxUqf181t30P3vqdpdc= github.com/cometbft/cometbft v0.38.0/go.mod h1:5Jz0Z8YsHSf0ZaAqGvi/ifioSdVFPtEGrm8Y9T/993k= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -49,40 +40,33 @@ github.com/cosmos/iavl v1.0.0-rc.1/go.mod h1:CmTGqMnRnucjxbjduneZXT+0vPgNElYvdef github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/dot v1.4.2 h1:UbK6gX4yvrpHKlxuUQicwoAis4zl8Dzwit9SnbBAXWw= github.com/emicklei/dot v1.4.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= -github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -90,7 +74,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= @@ -101,235 +84,145 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.1 h1:rfPwUqFU6uZXNvGl4hzjY8LEBsqFVU4si1H9/Hqck/U= -github.com/hashicorp/go-metrics v0.5.1/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= -github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= -github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10 h1:CqYfpuYIjnlNxM3msdyPRKabhXZWbKjf3Q8BWROFBso= +github.com/google/pprof v0.0.0-20230228050547-1710fef4ab10/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linxGnu/grocksdb v1.7.16 h1:Q2co1xrpdkr5Hx3Fp+f+f7fRGhQFQhvi/+226dtLmA8= -github.com/linxGnu/grocksdb v1.7.16/go.mod h1:JkS7pl5qWpGpuVb3bPqTz8nC12X3YtPZT+Xq7+QfQo4= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:FatpGJD2jmJfhZiFDElaC0QhZUDQnxUeAwTGkfAHN3I= github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d h1:htwtWgtQo8YS6JFWWi2DNgY0RwSGJ1ruMoxY6CUUclk= -github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= -github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= +github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -342,22 +235,43 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM= +modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.6.0 h1:i6mzavxrE9a30whzMfwf7XWVODx2r5OYXvU46cirX7o= +modernc.org/memory v1.6.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.25.0 h1:AFweiwPNd/b3BoKnBOfFm+Y260guGMF+0UFk0savqeA= +modernc.org/sqlite v1.25.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU= +modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= +modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c= +modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= +modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE= diff --git a/store/iavl/store.go b/store/iavl/store.go deleted file mode 100644 index 7734066269..0000000000 --- a/store/iavl/store.go +++ /dev/null @@ -1,416 +0,0 @@ -package iavl - -import ( - "errors" - "fmt" - "io" - - cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - ics23 "github.com/cosmos/ics23/go" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -const ( - DefaultIAVLCacheSize = 500000 -) - -var ( - _ types.KVStore = (*Store)(nil) - _ types.CommitStore = (*Store)(nil) - _ types.CommitKVStore = (*Store)(nil) - _ types.Queryable = (*Store)(nil) - _ types.StoreWithInitialVersion = (*Store)(nil) -) - -// Store Implements types.KVStore and CommitKVStore. -type Store struct { - tree Tree - logger log.Logger - metrics metrics.StoreMetrics -} - -// LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the -// store's version (id) from the provided DB. An error is returned if the version -// fails to load, or if called with a positive version on an empty tree. -func LoadStore(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) { - return LoadStoreWithInitialVersion(db, logger, key, id, 0, cacheSize, disableFastNode, metrics) -} - -// LoadStoreWithInitialVersion returns an IAVL Store as a CommitKVStore setting its initialVersion -// to the one given. Internally, it will load the store's version (id) from the -// provided DB. An error is returned if the version fails to load, or if called with a positive -// version on an empty tree. -func LoadStoreWithInitialVersion(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, initialVersion uint64, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) { - tree := iavl.NewMutableTree(db, cacheSize, disableFastNode, logger, iavl.InitialVersionOption(initialVersion)) - - isUpgradeable, err := tree.IsUpgradeable() - if err != nil { - return nil, err - } - - if isUpgradeable && logger != nil { - logger.Info( - "Upgrading IAVL storage for faster queries + execution on live state. This may take a while", - "store_key", key.String(), - "version", initialVersion, - "commit", fmt.Sprintf("%X", id), - ) - } - - _, err = tree.LoadVersion(id.Version) - if err != nil { - return nil, err - } - - if logger != nil { - logger.Debug("Finished loading IAVL tree") - } - - return &Store{ - tree: tree, - logger: logger, - metrics: metrics, - }, nil -} - -// UnsafeNewStore returns a reference to a new IAVL Store with a given mutable -// IAVL tree reference. It should only be used for testing purposes. -// -// CONTRACT: The IAVL tree should be fully loaded. -// CONTRACT: PruningOptions passed in as argument must be the same as pruning options -// passed into iavl.MutableTree -func UnsafeNewStore(tree *iavl.MutableTree) *Store { - return &Store{ - tree: tree, - metrics: metrics.NewNoOpMetrics(), - } -} - -// GetImmutable returns a reference to a new store backed by an immutable IAVL -// tree at a specific version (height) without any pruning options. This should -// be used for querying and iteration only. If the version does not exist or has -// been pruned, an empty immutable IAVL tree will be used. -// Any mutable operations executed will result in a panic. -func (st *Store) GetImmutable(version int64) (*Store, error) { - if !st.VersionExists(version) { - return nil, errors.New("version mismatch on immutable IAVL tree; version does not exist. Version has either been pruned, or is for a future block height") - } - - iTree, err := st.tree.GetImmutable(version) - if err != nil { - return nil, err - } - - return &Store{ - tree: &immutableTree{iTree}, - metrics: st.metrics, - }, nil -} - -// Commit commits the current store state and returns a CommitID with the new -// version and hash. -func (st *Store) Commit() types.CommitID { - defer st.metrics.MeasureSince("store", "iavl", "commit") - - hash, version, err := st.tree.SaveVersion() - if err != nil { - panic(err) - } - - return types.CommitID{ - Version: version, - Hash: hash, - } -} - -// WorkingHash returns the hash of the current working tree. -func (st *Store) WorkingHash() []byte { - return st.tree.WorkingHash() -} - -// LastCommitID implements Committer. -func (st *Store) LastCommitID() types.CommitID { - return types.CommitID{ - Version: st.tree.Version(), - Hash: st.tree.Hash(), - } -} - -// SetPruning panics as pruning options should be provided at initialization -// since IAVl accepts pruning options directly. -func (st *Store) SetPruning(_ pruningtypes.PruningOptions) { - panic("cannot set pruning options on an initialized IAVL store") -} - -// SetPruning panics as pruning options should be provided at initialization -// since IAVl accepts pruning options directly. -func (st *Store) GetPruning() pruningtypes.PruningOptions { - panic("cannot get pruning options on an initialized IAVL store") -} - -// VersionExists returns whether or not a given version is stored. -func (st *Store) VersionExists(version int64) bool { - return st.tree.VersionExists(version) -} - -// GetAllVersions returns all versions in the iavl tree -func (st *Store) GetAllVersions() []int { - return st.tree.AvailableVersions() -} - -// Implements Store. -func (st *Store) GetStoreType() types.StoreType { - return types.StoreTypeIAVL -} - -// Implements Store. -func (st *Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(st) -} - -// CacheWrapWithTrace implements the Store interface. -func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(st, w, tc)) -} - -// Implements types.KVStore. -func (st *Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - _, err := st.tree.Set(key, value) - if err != nil && st.logger != nil { - st.logger.Error("iavl set error", "error", err.Error()) - } -} - -// Implements types.KVStore. -func (st *Store) Get(key []byte) []byte { - defer st.metrics.MeasureSince("store", "iavl", "get") - value, err := st.tree.Get(key) - if err != nil { - panic(err) - } - return value -} - -// Implements types.KVStore. -func (st *Store) Has(key []byte) (exists bool) { - defer st.metrics.MeasureSince("store", "iavl", "has") - has, err := st.tree.Has(key) - if err != nil { - panic(err) - } - return has -} - -// Implements types.KVStore. -func (st *Store) Delete(key []byte) { - defer st.metrics.MeasureSince("store", "iavl", "delete") - _, _, err := st.tree.Remove(key) - if err != nil { - panic(err) - } -} - -// DeleteVersionsTo deletes versions upto the given version from the MutableTree. An error -// is returned if any single version is invalid or the delete fails. All writes -// happen in a single batch with a single commit. -func (st *Store) DeleteVersionsTo(version int64) error { - return st.tree.DeleteVersionsTo(version) -} - -// LoadVersionForOverwriting attempts to load a tree at a previously committed -// version. Any versions greater than targetVersion will be deleted. -func (st *Store) LoadVersionForOverwriting(targetVersion int64) error { - return st.tree.LoadVersionForOverwriting(targetVersion) -} - -// Implements types.KVStore. -func (st *Store) Iterator(start, end []byte) types.Iterator { - iterator, err := st.tree.Iterator(start, end, true) - if err != nil { - panic(err) - } - return iterator -} - -// Implements types.KVStore. -func (st *Store) ReverseIterator(start, end []byte) types.Iterator { - iterator, err := st.tree.Iterator(start, end, false) - if err != nil { - panic(err) - } - return iterator -} - -// SetInitialVersion sets the initial version of the IAVL tree. It is used when -// starting a new chain at an arbitrary height. -func (st *Store) SetInitialVersion(version int64) { - st.tree.SetInitialVersion(uint64(version)) -} - -// Exports the IAVL store at the given version, returning an iavl.Exporter for the tree. -func (st *Store) Export(version int64) (*iavl.Exporter, error) { - istore, err := st.GetImmutable(version) - if err != nil { - return nil, errorsmod.Wrapf(err, "iavl export failed for version %v", version) - } - tree, ok := istore.tree.(*immutableTree) - if !ok || tree == nil { - return nil, fmt.Errorf("iavl export failed: unable to fetch tree for version %v", version) - } - return tree.Export() -} - -// Import imports an IAVL tree at the given version, returning an iavl.Importer for importing. -func (st *Store) Import(version int64) (*iavl.Importer, error) { - tree, ok := st.tree.(*iavl.MutableTree) - if !ok { - return nil, errors.New("iavl import failed: unable to find mutable tree") - } - return tree.Import(version) -} - -// Handle gatest the latest height, if height is 0 -func getHeight(tree Tree, req *types.RequestQuery) int64 { - height := req.Height - if height == 0 { - latest := tree.Version() - if tree.VersionExists(latest - 1) { - height = latest - 1 - } else { - height = latest - } - } - return height -} - -// Query implements ABCI interface, allows queries -// -// by default we will return from (latest height -1), -// as we will have merkle proofs immediately (header height = data height + 1) -// If latest-1 is not present, use latest (which must be present) -// if you care to have the latest data to see a tx results, you must -// explicitly set the height you want to see -func (st *Store) Query(req *types.RequestQuery) (res *types.ResponseQuery, err error) { - defer st.metrics.MeasureSince("store", "iavl", "query") - - if len(req.Data) == 0 { - return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrTxDecode, "query cannot be zero length") - } - - tree := st.tree - - // store the height we chose in the response, with 0 being changed to the - // latest height - res = &types.ResponseQuery{ - Height: getHeight(tree, req), - } - - switch req.Path { - case "/key": // get by key - key := req.Data // data holds the key bytes - - res.Key = key - if !st.VersionExists(res.Height) { - res.Log = iavl.ErrVersionDoesNotExist.Error() - break - } - - value, err := tree.GetVersioned(key, res.Height) - if err != nil { - panic(err) - } - res.Value = value - - if !req.Prove { - break - } - - // Continue to prove existence/absence of value - // Must convert store.Tree to iavl.MutableTree with given version to use in CreateProof - iTree, err := tree.GetImmutable(res.Height) - if err != nil { - // sanity check: If value for given version was retrieved, immutable tree must also be retrievable - panic(fmt.Sprintf("version exists in store but could not retrieve corresponding versioned tree in store, %s", err.Error())) - } - mtree := &iavl.MutableTree{ - ImmutableTree: iTree, - } - - // get proof from tree and convert to merkle.Proof before adding to result - res.ProofOps = getProofFromTree(mtree, req.Data, res.Value != nil) - - case "/subspace": - pairs := kv.Pairs{ - Pairs: make([]kv.Pair, 0), - } - - subspace := req.Data - res.Key = subspace - - iterator := types.KVStorePrefixIterator(st, subspace) - for ; iterator.Valid(); iterator.Next() { - pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()}) - } - if err := iterator.Close(); err != nil { - panic(fmt.Errorf("failed to close iterator: %w", err)) - } - - bz, err := pairs.Marshal() - if err != nil { - panic(fmt.Errorf("failed to marshal KV pairs: %w", err)) - } - - res.Value = bz - - default: - return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "unexpected query path: %v", req.Path) - } - - return res, err -} - -// TraverseStateChanges traverses the state changes between two versions and calls the given function. -func (st *Store) TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error { - return st.tree.TraverseStateChanges(startVersion, endVersion, fn) -} - -// Takes a MutableTree, a key, and a flag for creating existence or absence proof and returns the -// appropriate merkle.Proof. Since this must be called after querying for the value, this function should never error -// Thus, it will panic on error rather than returning it -func getProofFromTree(tree *iavl.MutableTree, key []byte, exists bool) *cmtprotocrypto.ProofOps { - var ( - commitmentProof *ics23.CommitmentProof - err error - ) - - if exists { - // value was found - commitmentProof, err = tree.GetMembershipProof(key) - if err != nil { - // sanity check: If value was found, membership proof must be creatable - panic(fmt.Sprintf("unexpected value for empty proof: %s", err.Error())) - } - } else { - // value wasn't found - commitmentProof, err = tree.GetNonMembershipProof(key) - if err != nil { - // sanity check: If value wasn't found, nonmembership proof must be creatable - panic(fmt.Sprintf("unexpected error for nonexistence proof: %s", err.Error())) - } - } - - op := types.NewIavlCommitmentOp(key, commitmentProof) - return &cmtprotocrypto.ProofOps{Ops: []cmtprotocrypto.ProofOp{op.ProofOp()}} -} diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go deleted file mode 100644 index 9d54f8f82b..0000000000 --- a/store/iavl/store_test.go +++ /dev/null @@ -1,713 +0,0 @@ -package iavl - -import ( - "bytes" - crand "crypto/rand" - "fmt" - "math" - "sort" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/types" -) - -var ( - cacheSize = 100 - treeData = map[string]string{ - "hello": "goodbye", - "aloha": "shalom", - } - nMoreData = 0 -) - -func randBytes(numBytes int) []byte { - b := make([]byte, numBytes) - _, _ = crand.Read(b) - return b -} - -// make a tree with data from above and save it -func newAlohaTree(t *testing.T, db dbm.DB) (*iavl.MutableTree, types.CommitID) { - t.Helper() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - for k, v := range treeData { - _, err := tree.Set([]byte(k), []byte(v)) - require.NoError(t, err) - } - - for i := 0; i < nMoreData; i++ { - key := randBytes(12) - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(t, err) - } - - hash, ver, err := tree.SaveVersion() - require.Nil(t, err) - - return tree, types.CommitID{Version: ver, Hash: hash} -} - -func TestLoadStore(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - // Create non-pruned height H - updated, err := tree.Set([]byte("hello"), []byte("hallo")) - require.NoError(t, err) - require.True(t, updated) - hash, verH, err := tree.SaveVersion() - cIDH := types.CommitID{Version: verH, Hash: hash} - require.Nil(t, err) - - // Create pruned height Hp - updated, err = tree.Set([]byte("hello"), []byte("hola")) - require.NoError(t, err) - require.True(t, updated) - hash, verHp, err := tree.SaveVersion() - cIDHp := types.CommitID{Version: verHp, Hash: hash} - require.Nil(t, err) - - // TODO: Prune this height - - // Create current height Hc - updated, err = tree.Set([]byte("hello"), []byte("ciao")) - require.NoError(t, err) - require.True(t, updated) - hash, verHc, err := tree.SaveVersion() - cIDHc := types.CommitID{Version: verHc, Hash: hash} - require.Nil(t, err) - - // Querying an existing store at some previous non-pruned height H - hStore, err := store.GetImmutable(verH) - require.NoError(t, err) - require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo") - - // Querying an existing store at some previous pruned height Hp - hpStore, err := store.GetImmutable(verHp) - require.NoError(t, err) - require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola") - - // Querying an existing store at current height Hc - hcStore, err := store.GetImmutable(verHc) - require.NoError(t, err) - require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao") - - // Querying a new store at some previous non-pruned height H - newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo") - - // Querying a new store at some previous pruned height Hp - newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola") - - // Querying a new store at current height H - newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao") -} - -func TestGetImmutable(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - updated, err := tree.Set([]byte("hello"), []byte("adios")) - require.NoError(t, err) - require.True(t, updated) - hash, ver, err := tree.SaveVersion() - cID := types.CommitID{Version: ver, Hash: hash} - require.Nil(t, err) - - _, err = store.GetImmutable(cID.Version + 1) - require.Error(t, err) - - newStore, err := store.GetImmutable(cID.Version - 1) - require.NoError(t, err) - require.Equal(t, newStore.Get([]byte("hello")), []byte("goodbye")) - - newStore, err = store.GetImmutable(cID.Version) - require.NoError(t, err) - require.Equal(t, newStore.Get([]byte("hello")), []byte("adios")) - - res, err := newStore.Query(&types.RequestQuery{Data: []byte("hello"), Height: cID.Version, Path: "/key", Prove: true}) - require.NoError(t, err) - require.Equal(t, res.Value, []byte("adios")) - require.NotNil(t, res.ProofOps) - - require.Panics(t, func() { newStore.Set(nil, nil) }) - require.Panics(t, func() { newStore.Delete(nil) }) - require.Panics(t, func() { newStore.Commit() }) -} - -func TestTestGetImmutableIterator(t *testing.T) { - db := dbm.NewMemDB() - tree, cID := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - newStore, err := store.GetImmutable(cID.Version) - require.NoError(t, err) - - iter := newStore.Iterator([]byte("aloha"), []byte("hellz")) - expected := []string{"aloha", "hello"} - var i int - - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - - require.Equal(t, len(expected), i) -} - -func TestIAVLStoreGetSetHasDelete(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - - key := "hello" - - exists := iavlStore.Has([]byte(key)) - require.True(t, exists) - - value := iavlStore.Get([]byte(key)) - require.EqualValues(t, value, treeData[key]) - - value2 := "notgoodbye" - iavlStore.Set([]byte(key), []byte(value2)) - - value = iavlStore.Get([]byte(key)) - require.EqualValues(t, value, value2) - - iavlStore.Delete([]byte(key)) - - exists = iavlStore.Has([]byte(key)) - require.False(t, exists) -} - -func TestIAVLStoreNoNilSet(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - - require.Panics(t, func() { iavlStore.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { iavlStore.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - - require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic") -} - -func TestIAVLIterator(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz")) - expected := []string{"aloha", "hello"} - var i int - - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator([]byte("golang"), []byte("rocks")) - expected = []string{"hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, []byte("golang")) - expected = []string{"aloha"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, []byte("shalom")) - expected = []string{"aloha", "hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, nil) - expected = []string{"aloha", "hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator([]byte("golang"), nil) - expected = []string{"hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) -} - -func TestIAVLReverseIterator(t *testing.T) { - db := dbm.NewMemDB() - - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte{0x00}, []byte("0")) - iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0")) - iavlStore.Set([]byte{0x00, 0x01}, []byte("0 1")) - iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2")) - iavlStore.Set([]byte{0x01}, []byte("1")) - - testReverseIterator := func(t *testing.T, start, end []byte, expected []string) { - t.Helper() - iter := iavlStore.ReverseIterator(start, end) - var i int - for i = 0; iter.Valid(); iter.Next() { - expectedValue := expected[i] - value := iter.Value() - require.EqualValues(t, string(value), expectedValue) - i++ - } - require.Equal(t, len(expected), i) - } - - testReverseIterator(t, nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"}) - testReverseIterator(t, []byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"}) - testReverseIterator(t, nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) -} - -func TestIAVLPrefixIterator(t *testing.T) { - db := dbm.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte("test1"), []byte("test1")) - iavlStore.Set([]byte("test2"), []byte("test2")) - iavlStore.Set([]byte("test3"), []byte("test3")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) - - var i int - - iter := types.KVStorePrefixIterator(iavlStore, []byte("test")) - expected := []string{"test1", "test2", "test3"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, expectedKey) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) - - iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) - expected2 := [][]byte{ - {byte(55), byte(255), byte(255), byte(0)}, - {byte(55), byte(255), byte(255), byte(1)}, - {byte(55), byte(255), byte(255), byte(255)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) - - iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) - expected2 = [][]byte{ - {byte(255), byte(255), byte(0)}, - {byte(255), byte(255), byte(1)}, - {byte(255), byte(255), byte(255)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) -} - -func TestIAVLReversePrefixIterator(t *testing.T) { - db := dbm.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte("test1"), []byte("test1")) - iavlStore.Set([]byte("test2"), []byte("test2")) - iavlStore.Set([]byte("test3"), []byte("test3")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) - - var i int - - iter := types.KVStoreReversePrefixIterator(iavlStore, []byte("test")) - expected := []string{"test3", "test2", "test1"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, expectedKey) - i++ - } - require.Equal(t, len(expected), i) - - iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) - expected2 := [][]byte{ - {byte(55), byte(255), byte(255), byte(255)}, - {byte(55), byte(255), byte(255), byte(1)}, - {byte(55), byte(255), byte(255), byte(0)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - require.Equal(t, len(expected), i) - - iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) - expected2 = [][]byte{ - {byte(255), byte(255), byte(255)}, - {byte(255), byte(255), byte(1)}, - {byte(255), byte(255), byte(0)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - require.Equal(t, len(expected), i) -} - -func nextVersion(iavl *Store) { - key := []byte(fmt.Sprintf("Key for tree: %d", iavl.LastCommitID().Version)) - value := []byte(fmt.Sprintf("Value for tree: %d", iavl.LastCommitID().Version)) - iavl.Set(key, value) - iavl.Commit() -} - -func TestIAVLNoPrune(t *testing.T) { - db := dbm.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - nextVersion(iavlStore) - - for i := 1; i < 100; i++ { - for j := 1; j <= i; j++ { - require.True(t, iavlStore.VersionExists(int64(j)), - "Missing version %d with latest version %d. Should be storing all versions", - j, i) - } - - nextVersion(iavlStore) - } -} - -func TestIAVLStoreQuery(t *testing.T) { - db := dbm.NewMemDB() - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - k1, v1 := []byte("key1"), []byte("val1") - k2, v2 := []byte("key2"), []byte("val2") - v3 := []byte("val3") - - ksub := []byte("key") - KVs0 := kv.Pairs{} - KVs1 := kv.Pairs{ - Pairs: []kv.Pair{ - {Key: k1, Value: v1}, - {Key: k2, Value: v2}, - }, - } - KVs2 := kv.Pairs{ - Pairs: []kv.Pair{ - {Key: k1, Value: v3}, - {Key: k2, Value: v2}, - }, - } - - valExpSubEmpty, err := KVs0.Marshal() - require.NoError(t, err) - - valExpSub1, err := KVs1.Marshal() - require.NoError(t, err) - - valExpSub2, err := KVs2.Marshal() - require.NoError(t, err) - - cid := iavlStore.Commit() - ver := cid.Version - query := types.RequestQuery{Path: "/key", Data: k1, Height: ver} - querySub := types.RequestQuery{Path: "/subspace", Data: ksub, Height: ver} - - // query subspace before anything set - qres, err := iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSubEmpty, qres.Value) - - // set data - iavlStore.Set(k1, v1) - iavlStore.Set(k2, v2) - - // set data without commit, doesn't show up - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Nil(t, qres.Value) - - // commit it, but still don't see on old version - cid = iavlStore.Commit() - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Nil(t, qres.Value) - - // but yes on the new version - query.Height = cid.Version - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) - - // and for the subspace - qres, err = iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSub1, qres.Value) - - // modify - iavlStore.Set(k1, v3) - cid = iavlStore.Commit() - - // query will return old values, as height is fixed - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) - - // update to latest in the query and we are happy - query.Height = cid.Version - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v3, qres.Value) - query2 := types.RequestQuery{Path: "/key", Data: k2, Height: cid.Version} - - qres, err = iavlStore.Query(&query2) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v2, qres.Value) - // and for the subspace - qres, err = iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSub2, qres.Value) - - // default (height 0) will show latest -1 - query0 := types.RequestQuery{Path: "/key", Data: k1} - qres, err = iavlStore.Query(&query0) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) -} - -func BenchmarkIAVLIteratorNext(b *testing.B) { - b.ReportAllocs() - db := dbm.NewMemDB() - treeSize := 1000 - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - for i := 0; i < treeSize; i++ { - key := randBytes(4) - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(b, err) - } - - iavlStore := UnsafeNewStore(tree) - iterators := make([]types.Iterator, b.N/treeSize) - - for i := 0; i < len(iterators); i++ { - iterators[i] = iavlStore.Iterator([]byte{0}, []byte{255, 255, 255, 255, 255}) - } - - b.ResetTimer() - for i := 0; i < len(iterators); i++ { - iter := iterators[i] - for j := 0; j < treeSize; j++ { - iter.Next() - } - } -} - -func TestSetInitialVersion(t *testing.T) { - testCases := []struct { - name string - storeFn func(db *dbm.MemDB) *Store - expPanic bool - }{ - { - "works with a mutable tree", - func(db *dbm.MemDB) *Store { - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - store := UnsafeNewStore(tree) - - return store - }, false, - }, - { - "throws error on immutable tree", - func(db *dbm.MemDB) *Store { - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - store := UnsafeNewStore(tree) - _, version, err := store.tree.SaveVersion() - require.NoError(t, err) - require.Equal(t, int64(1), version) - store, err = store.GetImmutable(1) - require.NoError(t, err) - - return store - }, true, - }, - } - - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - db := dbm.NewMemDB() - store := tc.storeFn(db) - - if tc.expPanic { - require.Panics(t, func() { store.SetInitialVersion(5) }) - } else { - store.SetInitialVersion(5) - cid := store.Commit() - require.Equal(t, int64(5), cid.GetVersion()) - } - }) - } -} - -func TestCacheWraps(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - cacheWrapper := store.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} - -func TestChangeSets(t *testing.T) { - db := dbm.NewMemDB() - treeSize := 1000 - treeVersion := int64(10) - targetVersion := int64(6) - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger(), iavl.FlushThresholdOption(math.MaxInt)) - - for j := int64(0); j < treeVersion; j++ { - keys := [][]byte{} - for i := 0; i < treeSize; i++ { - keys = append(keys, randBytes(4)) - } - sort.Slice(keys, func(p, q int) bool { - return bytes.Compare(keys[p], keys[q]) < 0 - }) - for i := 0; i < treeSize; i++ { - key := keys[i] - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(t, err) - } - _, _, err := tree.SaveVersion() - require.NoError(t, err) - } - - changeSets := []*iavl.ChangeSet{} - iavlStore := UnsafeNewStore(tree) - commitID := iavlStore.LastCommitID() - - require.NoError(t, iavlStore.TraverseStateChanges(targetVersion+1, treeVersion, func(v int64, cs *iavl.ChangeSet) error { - changeSets = append(changeSets, cs) - return nil - })) - require.NoError(t, iavlStore.LoadVersionForOverwriting(targetVersion)) - - for i, cs := range changeSets { - v, err := tree.SaveChangeSet(cs) - require.NoError(t, err) - require.Equal(t, v, targetVersion+int64(i+1)) - } - - restoreCommitID := iavlStore.LastCommitID() - require.Equal(t, commitID, restoreCommitID) -} diff --git a/store/iavl/tree.go b/store/iavl/tree.go deleted file mode 100644 index a44559784a..0000000000 --- a/store/iavl/tree.go +++ /dev/null @@ -1,99 +0,0 @@ -package iavl - -import ( - "fmt" - - "github.com/cosmos/iavl" - - "cosmossdk.io/store/types" -) - -var ( - _ Tree = (*immutableTree)(nil) - _ Tree = (*iavl.MutableTree)(nil) -) - -type ( - // Tree defines an interface that both mutable and immutable IAVL trees - // must implement. For mutable IAVL trees, the interface is directly - // implemented by an iavl.MutableTree. For an immutable IAVL tree, a wrapper - // must be made. - Tree interface { - Has(key []byte) (bool, error) - Get(key []byte) ([]byte, error) - Set(key, value []byte) (bool, error) - Remove(key []byte) ([]byte, bool, error) - SaveVersion() ([]byte, int64, error) - Version() int64 - Hash() []byte - WorkingHash() []byte - VersionExists(version int64) bool - DeleteVersionsTo(version int64) error - GetVersioned(key []byte, version int64) ([]byte, error) - GetImmutable(version int64) (*iavl.ImmutableTree, error) - SetInitialVersion(version uint64) - Iterator(start, end []byte, ascending bool) (types.Iterator, error) - AvailableVersions() []int - LoadVersionForOverwriting(targetVersion int64) error - TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error - } - - // immutableTree is a simple wrapper around a reference to an iavl.ImmutableTree - // that implements the Tree interface. It should only be used for querying - // and iteration, specifically at previous heights. - immutableTree struct { - *iavl.ImmutableTree - } -) - -func (it *immutableTree) Set(_, _ []byte) (bool, error) { - panic("cannot call 'Set' on an immutable IAVL tree") -} - -func (it *immutableTree) Remove(_ []byte) ([]byte, bool, error) { - panic("cannot call 'Remove' on an immutable IAVL tree") -} - -func (it *immutableTree) SaveVersion() ([]byte, int64, error) { - panic("cannot call 'SaveVersion' on an immutable IAVL tree") -} - -func (it *immutableTree) DeleteVersionsTo(_ int64) error { - panic("cannot call 'DeleteVersionsTo' on an immutable IAVL tree") -} - -func (it *immutableTree) SetInitialVersion(_ uint64) { - panic("cannot call 'SetInitialVersion' on an immutable IAVL tree") -} - -func (it *immutableTree) VersionExists(version int64) bool { - return it.Version() == version -} - -func (it *immutableTree) GetVersioned(key []byte, version int64) ([]byte, error) { - if it.Version() != version { - return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version()) - } - - return it.Get(key) -} - -func (it *immutableTree) GetImmutable(version int64) (*iavl.ImmutableTree, error) { - if it.Version() != version { - return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version()) - } - - return it.ImmutableTree, nil -} - -func (it *immutableTree) AvailableVersions() []int { - return []int{} -} - -func (it *immutableTree) LoadVersionForOverwriting(targetVersion int64) error { - panic("cannot call 'LoadVersionForOverwriting' on an immutable IAVL tree") -} - -func (it *immutableTree) WorkingHash() []byte { - panic("cannot call 'WorkingHash' on an immutable IAVL tree") -} diff --git a/store/iavl/tree_test.go b/store/iavl/tree_test.go deleted file mode 100644 index 63a12a2e5d..0000000000 --- a/store/iavl/tree_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package iavl - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" -) - -func TestImmutableTreePanics(t *testing.T) { - t.Parallel() - immTree := iavl.NewImmutableTree(dbm.NewMemDB(), 100, false, log.NewNopLogger()) - it := &immutableTree{immTree} - require.Panics(t, func() { - _, err := it.Set([]byte{}, []byte{}) - require.NoError(t, err) - }) - require.Panics(t, func() { - _, _, err := it.Remove([]byte{}) - require.NoError(t, err) - }) - require.Panics(t, func() { _, _, _ = it.SaveVersion() }) - require.Panics(t, func() { _ = it.DeleteVersionsTo(int64(1)) }) - - val, err := it.GetVersioned(nil, 1) - require.Error(t, err) - require.Nil(t, val) - - imm, err := it.GetImmutable(1) - require.Error(t, err) - require.Nil(t, imm) - - imm, err = it.GetImmutable(0) - require.NoError(t, err) - require.NotNil(t, imm) - require.Equal(t, immTree, imm) -} diff --git a/store/internal/conv/doc.go b/store/internal/conv/doc.go deleted file mode 100644 index 1c86f5c144..0000000000 --- a/store/internal/conv/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package conv provides internal functions for convertions and data manipulation -package conv diff --git a/store/internal/conv/string.go b/store/internal/conv/string.go deleted file mode 100644 index 96d89c3a5f..0000000000 --- a/store/internal/conv/string.go +++ /dev/null @@ -1,19 +0,0 @@ -package conv - -import ( - "unsafe" -) - -// UnsafeStrToBytes uses unsafe to convert string into byte array. Returned bytes -// must not be altered after this function is called as it will cause a segmentation fault. -func UnsafeStrToBytes(s string) []byte { - return unsafe.Slice(unsafe.StringData(s), len(s)) // ref https://github.com/golang/go/issues/53003#issuecomment-1140276077 -} - -// UnsafeBytesToStr is meant to make a zero allocation conversion -// from []byte -> string to speed up operations, it is not meant -// to be used generally, but for a specific pattern to delete keys -// from a map. -func UnsafeBytesToStr(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} diff --git a/store/internal/conv/string_test.go b/store/internal/conv/string_test.go deleted file mode 100644 index 3a14517531..0000000000 --- a/store/internal/conv/string_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package conv - -import ( - "runtime" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/suite" -) - -func TestStringSuite(t *testing.T) { - suite.Run(t, new(StringSuite)) -} - -type StringSuite struct{ suite.Suite } - -func unsafeConvertStr() []byte { - return UnsafeStrToBytes("abc") -} - -func (s *StringSuite) TestUnsafeStrToBytes() { - // we convert in other function to trigger GC. We want to check that - // the underlying array in []bytes is accessible after GC will finish swapping. - for i := 0; i < 5; i++ { - b := unsafeConvertStr() - runtime.GC() - <-time.NewTimer(2 * time.Millisecond).C - b2 := append(b, 'd') - s.Equal("abc", string(b)) - s.Equal("abcd", string(b2)) - } -} - -func unsafeConvertBytes() string { - return UnsafeBytesToStr([]byte("abc")) -} - -func (s *StringSuite) TestUnsafeBytesToStr() { - // we convert in other function to trigger GC. We want to check that - // the underlying array in []bytes is accessible after GC will finish swapping. - for i := 0; i < 5; i++ { - str := unsafeConvertBytes() - runtime.GC() - <-time.NewTimer(2 * time.Millisecond).C - s.Equal("abc", str) - } -} - -func BenchmarkUnsafeStrToBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - UnsafeStrToBytes(strconv.Itoa(i)) - } -} diff --git a/store/internal/kv/kv.go b/store/internal/kv/kv.go index 1f3da91cc2..de3bf5ca01 100644 --- a/store/internal/kv/kv.go +++ b/store/internal/kv/kv.go @@ -5,6 +5,17 @@ import ( "sort" ) +type ( + Pair struct { + Key []byte + Value []byte + } + + Pairs struct { + Pairs []Pair + } +) + func (kvs Pairs) Len() int { return len(kvs.Pairs) } func (kvs Pairs) Less(i, j int) bool { switch bytes.Compare(kvs.Pairs[i].Key, kvs.Pairs[j].Key) { diff --git a/store/internal/kv/kv.pb.go b/store/internal/kv/kv.pb.go deleted file mode 100644 index 847bd11d44..0000000000 --- a/store/internal/kv/kv.pb.go +++ /dev/null @@ -1,559 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/internal/kv/v1beta1/kv.proto - -package kv - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Pairs defines a repeated slice of Pair objects. -type Pairs struct { - Pairs []Pair `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs"` -} - -func (m *Pairs) Reset() { *m = Pairs{} } -func (m *Pairs) String() string { return proto.CompactTextString(m) } -func (*Pairs) ProtoMessage() {} -func (*Pairs) Descriptor() ([]byte, []int) { - return fileDescriptor_534782c4083e056d, []int{0} -} -func (m *Pairs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Pairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Pairs.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Pairs) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pairs.Merge(m, src) -} -func (m *Pairs) XXX_Size() int { - return m.Size() -} -func (m *Pairs) XXX_DiscardUnknown() { - xxx_messageInfo_Pairs.DiscardUnknown(m) -} - -var xxx_messageInfo_Pairs proto.InternalMessageInfo - -func (m *Pairs) GetPairs() []Pair { - if m != nil { - return m.Pairs - } - return nil -} - -// Pair defines a key/value bytes tuple. -type Pair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Pair) Reset() { *m = Pair{} } -func (m *Pair) String() string { return proto.CompactTextString(m) } -func (*Pair) ProtoMessage() {} -func (*Pair) Descriptor() ([]byte, []int) { - return fileDescriptor_534782c4083e056d, []int{1} -} -func (m *Pair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Pair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Pair) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pair.Merge(m, src) -} -func (m *Pair) XXX_Size() int { - return m.Size() -} -func (m *Pair) XXX_DiscardUnknown() { - xxx_messageInfo_Pair.DiscardUnknown(m) -} - -var xxx_messageInfo_Pair proto.InternalMessageInfo - -func (m *Pair) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *Pair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Pairs)(nil), "cosmos.store.internal.kv.v1beta1.Pairs") - proto.RegisterType((*Pair)(nil), "cosmos.store.internal.kv.v1beta1.Pair") -} - -func init() { - proto.RegisterFile("cosmos/store/internal/kv/v1beta1/kv.proto", fileDescriptor_534782c4083e056d) -} - -var fileDescriptor_534782c4083e056d = []byte{ - // 217 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce, - 0xcd, 0x2f, 0xd6, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, - 0xcc, 0xd1, 0xcf, 0x2e, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0x2e, 0xd3, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x80, 0x28, 0xd5, 0x03, 0x2b, 0xd5, 0x83, 0x29, 0xd5, - 0xcb, 0x2e, 0xd3, 0x83, 0x2a, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd6, 0x07, 0xb1, - 0x20, 0xfa, 0x94, 0xbc, 0xb9, 0x58, 0x03, 0x12, 0x33, 0x8b, 0x8a, 0x85, 0x9c, 0xb8, 0x58, 0x0b, - 0x40, 0x0c, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x35, 0x3d, 0x42, 0x06, 0xea, 0x81, 0xf4, - 0x39, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0xd1, 0xaa, 0xa4, 0xc7, 0xc5, 0x02, 0x12, 0x14, - 0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, - 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x93, 0xc5, - 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, - 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xc9, 0x41, 0x6c, 0x2f, 0x4e, 0xc9, - 0xd6, 0xcb, 0xcc, 0xc7, 0xf4, 0x7f, 0x12, 0x1b, 0xd8, 0xf5, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x5d, 0xad, 0x97, 0xdd, 0x22, 0x01, 0x00, 0x00, -} - -func (m *Pairs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Pairs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Pairs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pairs) > 0 { - for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Pairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Pair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Pair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - offset -= sovKv(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Pairs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Pairs) > 0 { - for _, e := range m.Pairs { - l = e.Size() - n += 1 + l + sovKv(uint64(l)) - } - } - return n -} - -func (m *Pair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - return n -} - -func sovKv(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozKv(x uint64) (n int) { - return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Pairs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pairs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pairs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pairs = append(m.Pairs, Pair{}) - if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Pair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipKv(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthKv - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupKv - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthKv - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/internal/maps/maps.go b/store/internal/maps/maps.go index 2ee7d09b59..1d8d8f1a07 100644 --- a/store/internal/maps/maps.go +++ b/store/internal/maps/maps.go @@ -7,8 +7,8 @@ import ( "github.com/cometbft/cometbft/crypto/tmhash" cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/internal/tree" + "cosmossdk.io/store/v2/internal/kv" + "cosmossdk.io/store/v2/internal/tree" ) // merkleMap defines a merkle-ized tree from a map. Leave values are treated as diff --git a/store/internal/proofs/create.go b/store/internal/proofs/create.go index 55874d99cd..47e55e9785 100644 --- a/store/internal/proofs/create.go +++ b/store/internal/proofs/create.go @@ -6,7 +6,7 @@ import ( ics23 "github.com/cosmos/ics23/go" - sdkmaps "cosmossdk.io/store/internal/maps" + "cosmossdk.io/store/v2/internal/maps" ) var ( @@ -93,7 +93,7 @@ func createExistenceProof(data map[string][]byte, key []byte) (*ics23.ExistenceP return nil, errors.New("cannot make existence proof if key is not in map") } - _, proofs, _ := sdkmaps.ProofsFromMap(data) + _, proofs, _ := maps.ProofsFromMap(data) proof := proofs[string(key)] if proof == nil { return nil, errors.New("returned no proof for key") diff --git a/store/internal/proofs/helpers.go b/store/internal/proofs/helpers.go index 59c3bf0a9d..1a7d8a36aa 100644 --- a/store/internal/proofs/helpers.go +++ b/store/internal/proofs/helpers.go @@ -7,7 +7,7 @@ import ( "golang.org/x/exp/maps" "cosmossdk.io/math/unsafe" - sdkmaps "cosmossdk.io/store/internal/maps" + internalmaps "cosmossdk.io/store/v2/internal/maps" ) // SimpleResult contains a merkle.SimpleProof along with all data needed to build the confio/proof @@ -23,7 +23,7 @@ type SimpleResult struct { // returns a range proof and the root hash of the tree func GenerateRangeProof(size int, loc Where) *SimpleResult { data := BuildMap(size) - root, proofs, allkeys := sdkmaps.ProofsFromMap(data) + root, proofs, allkeys := internalmaps.ProofsFromMap(data) key := GetKey(allkeys, loc) proof := proofs[key] @@ -53,7 +53,7 @@ func SortedKeys(data map[string][]byte) []string { } func CalcRoot(data map[string][]byte) []byte { - root, _, _ := sdkmaps.ProofsFromMap(data) + root, _, _ := internalmaps.ProofsFromMap(data) return root } diff --git a/store/iterator.go b/store/iterator.go new file mode 100644 index 0000000000..fa49b715be --- /dev/null +++ b/store/iterator.go @@ -0,0 +1,38 @@ +package store + +// Iterator defines an interface for iterating over a domain of key/value pairs. +type Iterator interface { + // Domain returns the start (inclusive) and end (exclusive) limits of the iterator. + Domain() ([]byte, []byte) + + // Valid returns if the iterator is currently valid. + Valid() bool + + // Error returns any accumulated error. Error() should be called after all + // key/value pairs have been exhausted, i.e. after Next() has returned false. + Error() error + + // Key returns the key of the current key/value pair, or nil if done. + Key() []byte + + // Value returns the value of the current key/value pair, or nil if done. + Value() []byte + + // Next moves the iterator to the next key/value pair. + Next() bool + + // Close releases associated resources. It should NOT be idempotent. It must + // only be called once and any call after may panic. + Close() +} + +// IteratorCreator defines an interface for creating forward and reverse iterators. +type IteratorCreator interface { + // Iterator creates a new iterator for the given store name and domain, where + // domain is defined by [start, end). Note, both start and end are optional. + Iterator(storeKey string, start, end []byte) (Iterator, error) + // ReverseIterator creates a new reverse iterator for the given store name + // and domain, where domain is defined by [start, end). Note, both start and + // end are optional. + ReverseIterator(storeKey string, start, end []byte) (Iterator, error) +} diff --git a/store/listenkv/store.go b/store/listenkv/store.go deleted file mode 100644 index b08a6e3950..0000000000 --- a/store/listenkv/store.go +++ /dev/null @@ -1,142 +0,0 @@ -package listenkv - -import ( - "io" - - "cosmossdk.io/store/types" -) - -var _ types.KVStore = &Store{} - -// Store implements the KVStore interface with listening enabled. -// Operations are traced on each core KVStore call and written to any of the -// underlying listeners with the proper key and operation permissions -type Store struct { - parent types.KVStore - listener *types.MemoryListener - parentStoreKey types.StoreKey -} - -// NewStore returns a reference to a new traceKVStore given a parent -// KVStore implementation and a buffered writer. -func NewStore(parent types.KVStore, parentStoreKey types.StoreKey, listener *types.MemoryListener) *Store { - return &Store{parent: parent, listener: listener, parentStoreKey: parentStoreKey} -} - -// Get implements the KVStore interface. It traces a read operation and -// delegates a Get call to the parent KVStore. -func (s *Store) Get(key []byte) []byte { - value := s.parent.Get(key) - return value -} - -// Set implements the KVStore interface. It traces a write operation and -// delegates the Set call to the parent KVStore. -func (s *Store) Set(key, value []byte) { - types.AssertValidKey(key) - s.parent.Set(key, value) - s.listener.OnWrite(s.parentStoreKey, key, value, false) -} - -// Delete implements the KVStore interface. It traces a write operation and -// delegates the Delete call to the parent KVStore. -func (s *Store) Delete(key []byte) { - s.parent.Delete(key) - s.listener.OnWrite(s.parentStoreKey, key, nil, true) -} - -// Has implements the KVStore interface. It delegates the Has call to the -// parent KVStore. -func (s *Store) Has(key []byte) bool { - return s.parent.Has(key) -} - -// Iterator implements the KVStore interface. It delegates the Iterator call -// the to the parent KVStore. -func (s *Store) Iterator(start, end []byte) types.Iterator { - return s.iterator(start, end, true) -} - -// ReverseIterator implements the KVStore interface. It delegates the -// ReverseIterator call the to the parent KVStore. -func (s *Store) ReverseIterator(start, end []byte) types.Iterator { - return s.iterator(start, end, false) -} - -// iterator facilitates iteration over a KVStore. It delegates the necessary -// calls to it's parent KVStore. -func (s *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator - - if ascending { - parent = s.parent.Iterator(start, end) - } else { - parent = s.parent.ReverseIterator(start, end) - } - - return newTraceIterator(parent, s.listener) -} - -type listenIterator struct { - parent types.Iterator - listener *types.MemoryListener -} - -func newTraceIterator(parent types.Iterator, listener *types.MemoryListener) types.Iterator { - return &listenIterator{parent: parent, listener: listener} -} - -// Domain implements the Iterator interface. -func (li *listenIterator) Domain() (start, end []byte) { - return li.parent.Domain() -} - -// Valid implements the Iterator interface. -func (li *listenIterator) Valid() bool { - return li.parent.Valid() -} - -// Next implements the Iterator interface. -func (li *listenIterator) Next() { - li.parent.Next() -} - -// Key implements the Iterator interface. -func (li *listenIterator) Key() []byte { - key := li.parent.Key() - return key -} - -// Value implements the Iterator interface. -func (li *listenIterator) Value() []byte { - value := li.parent.Value() - return value -} - -// Close implements the Iterator interface. -func (li *listenIterator) Close() error { - return li.parent.Close() -} - -// Error delegates the Error call to the parent iterator. -func (li *listenIterator) Error() error { - return li.parent.Error() -} - -// GetStoreType implements the KVStore interface. It returns the underlying -// KVStore type. -func (s *Store) GetStoreType() types.StoreType { - return s.parent.GetStoreType() -} - -// CacheWrap implements the KVStore interface. It panics as a Store -// cannot be cache wrapped. -func (s *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a ListenKVStore") -} - -// CacheWrapWithTrace implements the KVStore interface. It panics as a -// Store cannot be cache wrapped. -func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a ListenKVStore") -} diff --git a/store/listenkv/store_test.go b/store/listenkv/store_test.go deleted file mode 100644 index 51b88912c2..0000000000 --- a/store/listenkv/store_test.go +++ /dev/null @@ -1,281 +0,0 @@ -package listenkv_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/listenkv" - "cosmossdk.io/store/prefix" - "cosmossdk.io/store/types" -) - -func bz(s string) []byte { return []byte(s) } - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -var kvPairs = []kv.Pair{ - {Key: keyFmt(1), Value: valFmt(1)}, - {Key: keyFmt(2), Value: valFmt(2)}, - {Key: keyFmt(3), Value: valFmt(3)}, -} - -var testStoreKey = types.NewKVStoreKey("listen_test") - -func newListenKVStore(listener *types.MemoryListener) *listenkv.Store { - store := newEmptyListenKVStore(listener) - - for _, kvPair := range kvPairs { - store.Set(kvPair.Key, kvPair.Value) - } - - return store -} - -func newEmptyListenKVStore(listener *types.MemoryListener) *listenkv.Store { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - - return listenkv.NewStore(memDB, testStoreKey, listener) -} - -func TestListenKVStoreGet(t *testing.T) { - testCases := []struct { - key []byte - expectedValue []byte - }{ - { - key: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - }, - { - key: []byte("does-not-exist"), - expectedValue: nil, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - value := store.Get(tc.key) - - require.Equal(t, tc.expectedValue, value) - } -} - -func TestListenKVStoreSet(t *testing.T) { - testCases := []struct { - key []byte - value []byte - expectedOut *types.StoreKVPair - }{ - { - key: kvPairs[0].Key, - value: kvPairs[0].Value, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[0].Key, - Value: kvPairs[0].Value, - StoreKey: testStoreKey.Name(), - Delete: false, - }, - }, - { - key: kvPairs[1].Key, - value: kvPairs[1].Value, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[1].Key, - Value: kvPairs[1].Value, - StoreKey: testStoreKey.Name(), - Delete: false, - }, - }, - { - key: kvPairs[2].Key, - value: kvPairs[2].Value, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[2].Key, - Value: kvPairs[2].Value, - StoreKey: testStoreKey.Name(), - Delete: false, - }, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newEmptyListenKVStore(listener) - store.Set(tc.key, tc.value) - storeKVPair := listener.PopStateCache()[0] - - require.Equal(t, tc.expectedOut, storeKVPair) - } - - listener := types.NewMemoryListener() - store := newEmptyListenKVStore(listener) - require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") -} - -func TestListenKVStoreDelete(t *testing.T) { - testCases := []struct { - key []byte - expectedOut *types.StoreKVPair - }{ - { - key: kvPairs[0].Key, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[0].Key, - Value: nil, - StoreKey: testStoreKey.Name(), - Delete: true, - }, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - store.Delete(tc.key) - cache := listener.PopStateCache() - require.NotEmpty(t, cache) - storeKVPair := cache[len(cache)-1] - - require.Equal(t, tc.expectedOut, storeKVPair) - } -} - -func TestListenKVStoreHas(t *testing.T) { - testCases := []struct { - key []byte - expected bool - }{ - { - key: kvPairs[0].Key, - expected: true, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - ok := store.Has(tc.key) - - require.Equal(t, tc.expected, ok) - } -} - -func TestTestListenKVStoreIterator(t *testing.T) { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - iterator := store.Iterator(nil, nil) - - s, e := iterator.Domain() - require.Equal(t, []byte(nil), s) - require.Equal(t, []byte(nil), e) - - testCases := []struct { - expectedKey []byte - expectedValue []byte - }{ - { - expectedKey: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - }, - { - expectedKey: kvPairs[1].Key, - expectedValue: kvPairs[1].Value, - }, - { - expectedKey: kvPairs[2].Key, - expectedValue: kvPairs[2].Value, - }, - } - - for _, tc := range testCases { - ka := iterator.Key() - require.Equal(t, tc.expectedKey, ka) - - va := iterator.Value() - require.Equal(t, tc.expectedValue, va) - - iterator.Next() - } - - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) -} - -func TestTestListenKVStoreReverseIterator(t *testing.T) { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - iterator := store.ReverseIterator(nil, nil) - - s, e := iterator.Domain() - require.Equal(t, []byte(nil), s) - require.Equal(t, []byte(nil), e) - - testCases := []struct { - expectedKey []byte - expectedValue []byte - }{ - { - expectedKey: kvPairs[2].Key, - expectedValue: kvPairs[2].Value, - }, - { - expectedKey: kvPairs[1].Key, - expectedValue: kvPairs[1].Value, - }, - { - expectedKey: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - }, - } - - for _, tc := range testCases { - ka := iterator.Key() - require.Equal(t, tc.expectedKey, ka) - - va := iterator.Value() - require.Equal(t, tc.expectedValue, va) - - iterator.Next() - } - - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) -} - -func TestListenKVStorePrefix(t *testing.T) { - store := newEmptyListenKVStore(nil) - pStore := prefix.NewStore(store, []byte("listen_prefix")) - require.IsType(t, prefix.Store{}, pStore) -} - -func TestListenKVStoreGetStoreType(t *testing.T) { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - store := newEmptyListenKVStore(nil) - require.Equal(t, memDB.GetStoreType(), store.GetStoreType()) -} - -func TestListenKVStoreCacheWrap(t *testing.T) { - store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) -} - -func TestListenKVStoreCacheWrapWithTrace(t *testing.T) { - store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) -} diff --git a/store/mem/mem_test.go b/store/mem/mem_test.go deleted file mode 100644 index 6595b45dce..0000000000 --- a/store/mem/mem_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package mem_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/mem" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -func TestStore(t *testing.T) { - db := mem.NewStore() - require.Equal(t, types.StoreTypeMemory, db.GetStoreType()) - - key, value := []byte("key"), []byte("value") - - require.Nil(t, db.Get(key)) - db.Set(key, value) - require.Equal(t, value, db.Get(key)) - - newValue := []byte("newValue") - db.Set(key, newValue) - require.Equal(t, newValue, db.Get(key)) - - db.Delete(key) - require.Nil(t, db.Get(key)) - - cacheWrapper := db.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} - -func TestCommit(t *testing.T) { - db := mem.NewStore() - key, value := []byte("key"), []byte("value") - - db.Set(key, value) - id := db.Commit() - require.True(t, id.IsZero()) - require.True(t, db.LastCommitID().IsZero()) - require.Equal(t, value, db.Get(key)) -} - -func TestStorePrunningOptions(t *testing.T) { - // this is a no-op - db := mem.NewStore() - require.Equal(t, pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined), db.GetPruning()) -} diff --git a/store/mem/store.go b/store/mem/store.go deleted file mode 100644 index b819d75363..0000000000 --- a/store/mem/store.go +++ /dev/null @@ -1,62 +0,0 @@ -package mem - -import ( - "io" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -var ( - _ types.KVStore = (*Store)(nil) - _ types.Committer = (*Store)(nil) -) - -// Store implements an in-memory only KVStore. Entries are persisted between -// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node -type Store struct { - dbadapter.Store -} - -func NewStore() *Store { - return NewStoreWithDB(dbm.NewMemDB()) -} - -func NewStoreWithDB(db *dbm.MemDB) *Store { //nolint: interfacer // Concrete return type is fine here. - return &Store{Store: dbadapter.Store{DB: db}} -} - -// GetStoreType returns the Store's type. -func (s Store) GetStoreType() types.StoreType { - return types.StoreTypeMemory -} - -// CacheWrap branches the underlying store. -func (s Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(s) -} - -// CacheWrapWithTrace implements KVStore. -func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(s, w, tc)) -} - -// Commit performs a no-op as entries are persistent between commitments. -func (s *Store) Commit() (id types.CommitID) { return } - -func (s *Store) SetPruning(pruning pruningtypes.PruningOptions) {} - -// GetPruning is a no-op as pruning options cannot be directly set on this store. -// They must be set on the root commit multi-store. -func (s *Store) GetPruning() pruningtypes.PruningOptions { - return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) -} - -func (s Store) LastCommitID() (id types.CommitID) { return } - -func (s Store) WorkingHash() (hash []byte) { return } diff --git a/store/memkv/README.md b/store/memkv/README.md new file mode 100644 index 0000000000..52c0734649 --- /dev/null +++ b/store/memkv/README.md @@ -0,0 +1,8 @@ +# memkv + +The `memkv.Store` implementation defines an in-memory `KVStore`, which is internally +backed by a thread-safe BTree. The `memkv.Store` does not provide any branching +functionality and should be used as an ephemeral store, typically reset between +blocks. A `memkv.Store` contains no reference to a parent store, but can be used +as a parent store for other stores. The `memkv.Store` is can be useful for testing +purposes and where state persistence is not required or should be ephemeral. diff --git a/store/memkv/iterator.go b/store/memkv/iterator.go new file mode 100644 index 0000000000..81d58de3de --- /dev/null +++ b/store/memkv/iterator.go @@ -0,0 +1,120 @@ +package memkv + +import ( + "bytes" + + "github.com/tidwall/btree" + "golang.org/x/exp/slices" + + "cosmossdk.io/store/v2" +) + +var _ store.Iterator = (*iterator)(nil) + +type iterator struct { + treeItr btree.IterG[store.KVPair] + start []byte + end []byte + reverse bool + valid bool +} + +func newIterator(tree *btree.BTreeG[store.KVPair], start, end []byte, reverse bool) store.Iterator { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + panic(store.ErrKeyEmpty) + } + + if start != nil && end != nil && bytes.Compare(start, end) > 0 { + panic(store.ErrStartAfterEnd) + } + + iter := tree.Iter() + + var valid bool + if reverse { + if end != nil { + valid = iter.Seek(store.KVPair{Key: end, Value: nil}) + if !valid { + valid = iter.Last() + } else { + valid = iter.Prev() // end is exclusive + } + } else { + valid = iter.Last() + } + } else { + if start != nil { + valid = iter.Seek(store.KVPair{Key: start, Value: nil}) + } else { + valid = iter.First() + } + } + + itr := &iterator{ + treeItr: iter, + start: start, + end: end, + reverse: reverse, + valid: valid, + } + + if itr.valid { + itr.valid = itr.keyInRange(itr.Key()) + } + + return itr +} + +// Domain returns the domain of the iterator. The caller must not modify the +// return values. +func (itr *iterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr *iterator) Valid() bool { + return itr.valid +} + +func (itr *iterator) Key() []byte { + return slices.Clone(itr.treeItr.Item().Key) +} + +func (itr *iterator) Value() []byte { + return slices.Clone(itr.treeItr.Item().Value) +} + +func (itr *iterator) Next() bool { + if !itr.valid { + return false + } + + if !itr.reverse { + itr.valid = itr.treeItr.Next() + } else { + itr.valid = itr.treeItr.Prev() + } + + if itr.valid { + itr.valid = itr.keyInRange(itr.Key()) + } + + return itr.valid +} + +func (itr *iterator) Close() { + itr.treeItr.Release() +} + +func (itr *iterator) Error() error { + return nil +} + +func (itr *iterator) keyInRange(key []byte) bool { + if !itr.reverse && itr.end != nil && bytes.Compare(key, itr.end) >= 0 { + return false + } + if itr.reverse && itr.start != nil && bytes.Compare(key, itr.start) < 0 { + return false + } + return true +} diff --git a/store/memkv/store.go b/store/memkv/store.go new file mode 100644 index 0000000000..b0914f7454 --- /dev/null +++ b/store/memkv/store.go @@ -0,0 +1,104 @@ +package memkv + +import ( + "bytes" + + "github.com/tidwall/btree" + + "cosmossdk.io/store/v2" +) + +const ( + // degree defines the approximate number of items and children per B-tree node. + degree = 32 +) + +var _ store.KVStore = (*Store)(nil) + +// Store defines an in-memory KVStore backed by a BTree for storage, indexing, +// and iteration. Note, the store is ephemeral and does not support commitment. +// If using the store between blocks or commitments, the caller must ensure to +// either create a new store or call Reset() on the existing store. +type Store struct { + storeKey string + tree *btree.BTreeG[store.KVPair] +} + +func New(storeKey string) store.KVStore { + return &Store{ + storeKey: storeKey, + tree: btree.NewBTreeGOptions( + func(a, b store.KVPair) bool { return bytes.Compare(a.Key, b.Key) <= -1 }, + btree.Options{ + Degree: degree, + NoLocks: false, + }), + } +} + +func (s *Store) GetStoreKey() string { + return s.storeKey +} + +func (s *Store) GetStoreType() store.StoreType { + return store.StoreTypeMem +} + +func (s *Store) Get(key []byte) []byte { + store.AssertValidKey(key) + + kvPair, ok := s.tree.Get(store.KVPair{Key: key, StoreKey: s.storeKey}) + if !ok || kvPair.Value == nil { + return nil + } + + return kvPair.Value +} + +func (s *Store) Has(key []byte) bool { + store.AssertValidKey(key) + + return s.Get(key) != nil +} + +func (s *Store) Set(key, value []byte) { + store.AssertValidKey(key) + store.AssertValidValue(value) + + s.tree.Set(store.KVPair{Key: key, Value: value, StoreKey: s.storeKey}) +} + +func (s *Store) Delete(key []byte) { + store.AssertValidKey(key) + + s.tree.Set(store.KVPair{Key: key, StoreKey: s.storeKey, Value: nil}) +} + +func (s *Store) GetChangeset() *store.Changeset { + itr := s.Iterator(nil, nil) + defer itr.Close() + + var kvPairs []store.KVPair + for ; itr.Valid(); itr.Next() { + kvPairs = append(kvPairs, store.KVPair{ + StoreKey: s.storeKey, + Key: itr.Key(), + Value: itr.Value(), + }) + } + + return store.NewChangeset(kvPairs...) +} + +func (s *Store) Reset() error { + s.tree.Clear() + return nil +} + +func (s *Store) Iterator(start, end []byte) store.Iterator { + return newIterator(s.tree, start, end, false) +} + +func (s *Store) ReverseIterator(start, end []byte) store.Iterator { + return newIterator(s.tree, start, end, true) +} diff --git a/store/memkv/store_test.go b/store/memkv/store_test.go new file mode 100644 index 0000000000..d20a22f0db --- /dev/null +++ b/store/memkv/store_test.go @@ -0,0 +1,257 @@ +package memkv_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/memkv" +) + +const storeKey = "storeKey" + +type StoreTestSuite struct { + suite.Suite + + kvStore store.KVStore +} + +func TestStorageTestSuite(t *testing.T) { + suite.Run(t, &StoreTestSuite{}) +} + +func (s *StoreTestSuite) SetupTest() { + s.kvStore = memkv.New(storeKey) +} + +func (s *StoreTestSuite) TestGetStoreType() { + s.Require().Equal(store.StoreTypeMem, s.kvStore.GetStoreType()) +} + +func (s *StoreTestSuite) TestGetChangeset() { + // initial store with no writes should have an empty changeset + cs := s.kvStore.GetChangeset() + s.Require().Zero(cs.Size()) + + // perform some writes + s.kvStore.Set([]byte("key000"), []byte("updated_val000")) + s.kvStore.Delete([]byte("key001")) + + cs = s.kvStore.GetChangeset() + s.Require().Equal(cs.Size(), 2) +} + +func (s *StoreTestSuite) TestReset() { + s.Require().NoError(s.kvStore.Reset()) + + cs := s.kvStore.GetChangeset() + s.Require().Zero(cs.Size()) +} + +func (s *StoreTestSuite) TestCRUD() { + bz := s.kvStore.Get([]byte("key000")) + s.Require().Nil(bz) + s.Require().False(s.kvStore.Has([]byte("key000"))) + + s.kvStore.Set([]byte("key000"), []byte("val000")) + + bz = s.kvStore.Get([]byte("key000")) + s.Require().Equal([]byte("val000"), bz) + s.Require().True(s.kvStore.Has([]byte("key000"))) + + s.kvStore.Delete([]byte("key000")) + + bz = s.kvStore.Get([]byte("key000")) + s.Require().Nil(bz) + s.Require().False(s.kvStore.Has([]byte("key000"))) +} + +func (s *StoreTestSuite) TestIterator() { + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + s.kvStore.Set([]byte(key), []byte(val)) + } + + // iterator without an end domain + s.Run("start_only", func() { + itr := s.kvStore.Iterator([]byte("key000"), nil) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator without a start domain + s.Run("end_only", func() { + itr := s.kvStore.Iterator(nil, []byte("key100")) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with with a start and end domain + s.Run("start_and_end", func() { + itr := s.kvStore.Iterator([]byte("key000"), []byte("key050")) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(50, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // iterator with an open domain + s.Run("open_domain", func() { + itr := s.kvStore.Iterator(nil, nil) + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) +} + +func (s *StoreTestSuite) TestReverseIterator() { + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + s.kvStore.Set([]byte(key), []byte(val)) + } + + // reverse iterator without an end domain + s.Run("start_only", func() { + itr := s.kvStore.ReverseIterator([]byte("key000"), nil) + defer itr.Close() + + i := 99 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // reverse iterator without a start domain + s.Run("end_only", func() { + itr := s.kvStore.ReverseIterator(nil, []byte("key100")) + defer itr.Close() + + i := 99 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Valid()) + s.Require().False(itr.Next()) + }) + + // reverse iterator with with a start and end domain + s.Run("start_and_end", func() { + itr := s.kvStore.ReverseIterator([]byte("key000"), []byte("key050")) + defer itr.Close() + + i := 49 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(50, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) + + // reverse iterator with an open domain + s.Run("open_domain", func() { + itr := s.kvStore.ReverseIterator(nil, nil) + defer itr.Close() + + i := 99 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i-- + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + }) +} diff --git a/store/metrics/telemetry.go b/store/metrics/telemetry.go deleted file mode 100644 index d5bc55c45b..0000000000 --- a/store/metrics/telemetry.go +++ /dev/null @@ -1,56 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/hashicorp/go-metrics" -) - -// StoreMetrics defines the set of metrics for the store package -type StoreMetrics interface { - MeasureSince(keys ...string) -} - -var ( - _ StoreMetrics = Metrics{} - _ StoreMetrics = NoOpMetrics{} -) - -// Metrics defines the metrics wrapper for the store package -type Metrics struct { - Labels []metrics.Label -} - -// NewMetrics returns a new instance of the Metrics with labels set by the node operator -func NewMetrics(labels [][]string) Metrics { - gatherer := Metrics{} - - if numGlobalLables := len(labels); numGlobalLables > 0 { - parsedGlobalLabels := make([]metrics.Label, numGlobalLables) - for i, gl := range labels { - parsedGlobalLabels[i] = metrics.Label{Name: gl[0], Value: gl[1]} - } - - gatherer.Labels = parsedGlobalLabels - } - - return gatherer -} - -// MeasureSince provides a wrapper functionality for emitting a time measure -// metric with global labels (if any). -func (m Metrics) MeasureSince(keys ...string) { - start := time.Now() - metrics.MeasureSinceWithLabels(keys, start.UTC(), m.Labels) -} - -// NoOpMetrics is a no-op implementation of the StoreMetrics interface -type NoOpMetrics struct{} - -// NewNoOpMetrics returns a new instance of the NoOpMetrics -func NewNoOpMetrics() NoOpMetrics { - return NoOpMetrics{} -} - -// MeasureSince is a no-op implementation of the StoreMetrics interface to avoid time.Now() calls -func (m NoOpMetrics) MeasureSince(keys ...string) {} diff --git a/store/mock/cosmos_cosmos_db_DB.go b/store/mock/cosmos_cosmos_db_DB.go deleted file mode 100644 index 4a79ee7956..0000000000 --- a/store/mock/cosmos_cosmos_db_DB.go +++ /dev/null @@ -1,221 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/cosmos/cosmos-db (interfaces: DB) - -// Package mock is a generated GoMock package. -package mock - -import ( - reflect "reflect" - - db "github.com/cosmos/cosmos-db" - gomock "github.com/golang/mock/gomock" -) - -// MockDB is a mock of DB interface. -type MockDB struct { - ctrl *gomock.Controller - recorder *MockDBMockRecorder -} - -// MockDBMockRecorder is the mock recorder for MockDB. -type MockDBMockRecorder struct { - mock *MockDB -} - -// NewMockDB creates a new mock instance. -func NewMockDB(ctrl *gomock.Controller) *MockDB { - mock := &MockDB{ctrl: ctrl} - mock.recorder = &MockDBMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDB) EXPECT() *MockDBMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockDB) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockDBMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDB)(nil).Close)) -} - -// Delete mocks base method. -func (m *MockDB) Delete(arg0 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockDBMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDB)(nil).Delete), arg0) -} - -// DeleteSync mocks base method. -func (m *MockDB) DeleteSync(arg0 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteSync", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteSync indicates an expected call of DeleteSync. -func (mr *MockDBMockRecorder) DeleteSync(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSync", reflect.TypeOf((*MockDB)(nil).DeleteSync), arg0) -} - -// Get mocks base method. -func (m *MockDB) Get(arg0 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockDBMockRecorder) Get(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDB)(nil).Get), arg0) -} - -// Has mocks base method. -func (m *MockDB) Has(arg0 []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Has indicates an expected call of Has. -func (mr *MockDBMockRecorder) Has(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDB)(nil).Has), arg0) -} - -// Iterator mocks base method. -func (m *MockDB) Iterator(arg0, arg1 []byte) (db.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Iterator", arg0, arg1) - ret0, _ := ret[0].(db.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Iterator indicates an expected call of Iterator. -func (mr *MockDBMockRecorder) Iterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDB)(nil).Iterator), arg0, arg1) -} - -// NewBatch mocks base method. -func (m *MockDB) NewBatch() db.Batch { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBatch") - ret0, _ := ret[0].(db.Batch) - return ret0 -} - -// NewBatch indicates an expected call of NewBatch. -func (mr *MockDBMockRecorder) NewBatch() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockDB)(nil).NewBatch)) -} - -// NewBatchWithSize mocks base method. -func (m *MockDB) NewBatchWithSize(arg0 int) db.Batch { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBatchWithSize", arg0) - ret0, _ := ret[0].(db.Batch) - return ret0 -} - -// NewBatchWithSize indicates an expected call of NewBatchWithSize. -func (mr *MockDBMockRecorder) NewBatchWithSize(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatchWithSize", reflect.TypeOf((*MockDB)(nil).NewBatchWithSize), arg0) -} - -// Print mocks base method. -func (m *MockDB) Print() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Print") - ret0, _ := ret[0].(error) - return ret0 -} - -// Print indicates an expected call of Print. -func (mr *MockDBMockRecorder) Print() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockDB)(nil).Print)) -} - -// ReverseIterator mocks base method. -func (m *MockDB) ReverseIterator(arg0, arg1 []byte) (db.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReverseIterator", arg0, arg1) - ret0, _ := ret[0].(db.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReverseIterator indicates an expected call of ReverseIterator. -func (mr *MockDBMockRecorder) ReverseIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDB)(nil).ReverseIterator), arg0, arg1) -} - -// Set mocks base method. -func (m *MockDB) Set(arg0, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Set", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Set indicates an expected call of Set. -func (mr *MockDBMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDB)(nil).Set), arg0, arg1) -} - -// SetSync mocks base method. -func (m *MockDB) SetSync(arg0, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetSync", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetSync indicates an expected call of SetSync. -func (mr *MockDBMockRecorder) SetSync(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSync", reflect.TypeOf((*MockDB)(nil).SetSync), arg0, arg1) -} - -// Stats mocks base method. -func (m *MockDB) Stats() map[string]string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stats") - ret0, _ := ret[0].(map[string]string) - return ret0 -} - -// Stats indicates an expected call of Stats. -func (mr *MockDBMockRecorder) Stats() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDB)(nil).Stats)) -} diff --git a/store/prefix/store.go b/store/prefix/store.go deleted file mode 100644 index 32b9e8247e..0000000000 --- a/store/prefix/store.go +++ /dev/null @@ -1,207 +0,0 @@ -package prefix - -import ( - "bytes" - "errors" - "io" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -var _ types.KVStore = Store{} - -// Store is similar with cometbft/cometbft/libs/db/prefix_db -// both gives access only to the limited subset of the store -// for convinience or safety -type Store struct { - parent types.KVStore - prefix []byte -} - -func NewStore(parent types.KVStore, prefix []byte) Store { - return Store{ - parent: parent, - prefix: prefix, - } -} - -func cloneAppend(bz, tail []byte) (res []byte) { - res = make([]byte, len(bz)+len(tail)) - copy(res, bz) - copy(res[len(bz):], tail) - return -} - -func (s Store) key(key []byte) (res []byte) { - if key == nil { - panic("nil key on Store") - } - res = cloneAppend(s.prefix, key) - return -} - -// Implements Store -func (s Store) GetStoreType() types.StoreType { - return s.parent.GetStoreType() -} - -// Implements CacheWrap -func (s Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(s) -} - -// CacheWrapWithTrace implements the KVStore interface. -func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(s, w, tc)) -} - -// Implements KVStore -func (s Store) Get(key []byte) []byte { - res := s.parent.Get(s.key(key)) - return res -} - -// Implements KVStore -func (s Store) Has(key []byte) bool { - return s.parent.Has(s.key(key)) -} - -// Implements KVStore -func (s Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - s.parent.Set(s.key(key), value) -} - -// Implements KVStore -func (s Store) Delete(key []byte) { - s.parent.Delete(s.key(key)) -} - -// Implements KVStore -// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L106 -func (s Store) Iterator(start, end []byte) types.Iterator { - newstart := cloneAppend(s.prefix, start) - - var newend []byte - if end == nil { - newend = cpIncr(s.prefix) - } else { - newend = cloneAppend(s.prefix, end) - } - - iter := s.parent.Iterator(newstart, newend) - - return newPrefixIterator(s.prefix, start, end, iter) -} - -// ReverseIterator implements KVStore -// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L129 -func (s Store) ReverseIterator(start, end []byte) types.Iterator { - newstart := cloneAppend(s.prefix, start) - - var newend []byte - if end == nil { - newend = cpIncr(s.prefix) - } else { - newend = cloneAppend(s.prefix, end) - } - - iter := s.parent.ReverseIterator(newstart, newend) - - return newPrefixIterator(s.prefix, start, end, iter) -} - -var _ types.Iterator = (*prefixIterator)(nil) - -type prefixIterator struct { - prefix []byte - start []byte - end []byte - iter types.Iterator - valid bool -} - -func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefixIterator { - return &prefixIterator{ - prefix: prefix, - start: start, - end: end, - iter: parent, - valid: parent.Valid() && bytes.HasPrefix(parent.Key(), prefix), - } -} - -// Implements Iterator -func (pi *prefixIterator) Domain() ([]byte, []byte) { - return pi.start, pi.end -} - -// Implements Iterator -func (pi *prefixIterator) Valid() bool { - return pi.valid && pi.iter.Valid() -} - -// Implements Iterator -func (pi *prefixIterator) Next() { - if !pi.valid { - panic("prefixIterator invalid, cannot call Next()") - } - - if pi.iter.Next(); !pi.iter.Valid() || !bytes.HasPrefix(pi.iter.Key(), pi.prefix) { - // TODO: shouldn't pi be set to nil instead? - pi.valid = false - } -} - -// Implements Iterator -func (pi *prefixIterator) Key() (key []byte) { - if !pi.valid { - panic("prefixIterator invalid, cannot call Key()") - } - - key = pi.iter.Key() - key = stripPrefix(key, pi.prefix) - - return -} - -// Implements Iterator -func (pi *prefixIterator) Value() []byte { - if !pi.valid { - panic("prefixIterator invalid, cannot call Value()") - } - - return pi.iter.Value() -} - -// Implements Iterator -func (pi *prefixIterator) Close() error { - return pi.iter.Close() -} - -// Error returns an error if the prefixIterator is invalid defined by the Valid -// method. -func (pi *prefixIterator) Error() error { - if !pi.Valid() { - return errors.New("invalid prefixIterator") - } - - return nil -} - -// copied from github.com/cometbft/cometbft/libs/db/prefix_db.go -func stripPrefix(key, prefix []byte) []byte { - if len(key) < len(prefix) || !bytes.Equal(key[:len(prefix)], prefix) { - panic("should not happen") - } - - return key[len(prefix):] -} - -// wrapping types.PrefixEndBytes -func cpIncr(bz []byte) []byte { - return types.PrefixEndBytes(bz) -} diff --git a/store/prefix/store_test.go b/store/prefix/store_test.go deleted file mode 100644 index 9121e1f554..0000000000 --- a/store/prefix/store_test.go +++ /dev/null @@ -1,450 +0,0 @@ -package prefix - -import ( - "crypto/rand" - "testing" - - dbm "github.com/cosmos/cosmos-db" - tiavl "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/gaskv" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/types" -) - -// copied from iavl/store_test.go -var ( - cacheSize = 100 -) - -func bz(s string) []byte { return []byte(s) } - -type kvpair struct { - key []byte - value []byte -} - -func genRandomKVPairs(t *testing.T) []kvpair { - t.Helper() - kvps := make([]kvpair, 20) - - for i := 0; i < 20; i++ { - kvps[i].key = make([]byte, 32) - _, err := rand.Read(kvps[i].key) - require.NoError(t, err) - kvps[i].value = make([]byte, 32) - _, err = rand.Read(kvps[i].value) - require.NoError(t, err) - } - - return kvps -} - -func setRandomKVPairs(t *testing.T, store types.KVStore) []kvpair { - t.Helper() - kvps := genRandomKVPairs(t) - for _, kvp := range kvps { - store.Set(kvp.key, kvp.value) - } - return kvps -} - -func testPrefixStore(t *testing.T, baseStore types.KVStore, prefix []byte) { - t.Helper() - prefixStore := NewStore(baseStore, prefix) - prefixPrefixStore := NewStore(prefixStore, []byte("prefix")) - - require.Panics(t, func() { prefixStore.Get(nil) }) - require.Panics(t, func() { prefixStore.Set(nil, []byte{}) }) - - kvps := setRandomKVPairs(t, prefixPrefixStore) - - for i := 0; i < 20; i++ { - key := kvps[i].key - value := kvps[i].value - require.True(t, prefixPrefixStore.Has(key)) - require.Equal(t, value, prefixPrefixStore.Get(key)) - - key = append([]byte("prefix"), key...) - require.True(t, prefixStore.Has(key)) - require.Equal(t, value, prefixStore.Get(key)) - key = append(prefix, key...) - require.True(t, baseStore.Has(key)) - require.Equal(t, value, baseStore.Get(key)) - - key = kvps[i].key - prefixPrefixStore.Delete(key) - require.False(t, prefixPrefixStore.Has(key)) - require.Nil(t, prefixPrefixStore.Get(key)) - key = append([]byte("prefix"), key...) - require.False(t, prefixStore.Has(key)) - require.Nil(t, prefixStore.Get(key)) - key = append(prefix, key...) - require.False(t, baseStore.Has(key)) - require.Nil(t, baseStore.Get(key)) - } -} - -func TestIAVLStorePrefix(t *testing.T) { - db := dbm.NewMemDB() - tree := tiavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - iavlStore := iavl.UnsafeNewStore(tree) - - testPrefixStore(t, iavlStore, []byte("test")) -} - -func TestPrefixKVStoreNoNilSet(t *testing.T) { - meter := types.NewGasMeter(100000000) - mem := dbadapter.Store{DB: dbm.NewMemDB()} - gasStore := gaskv.NewStore(mem, meter, types.KVGasConfig()) - require.Panics(t, func() { gasStore.Set([]byte("key"), nil) }, "setting a nil value should panic") -} - -func TestPrefixStoreIterate(t *testing.T) { - db := dbm.NewMemDB() - baseStore := dbadapter.Store{DB: db} - prefix := []byte("test") - prefixStore := NewStore(baseStore, prefix) - - setRandomKVPairs(t, prefixStore) - - bIter := types.KVStorePrefixIterator(baseStore, prefix) - pIter := types.KVStorePrefixIterator(prefixStore, nil) - - for bIter.Valid() && pIter.Valid() { - require.Equal(t, bIter.Key(), append(prefix, pIter.Key()...)) - require.Equal(t, bIter.Value(), pIter.Value()) - - bIter.Next() - pIter.Next() - } - - bIter.Close() - pIter.Close() -} - -func incFirstByte(bz []byte) { - bz[0]++ -} - -func TestCloneAppend(t *testing.T) { - kvps := genRandomKVPairs(t) - for _, kvp := range kvps { - bz := cloneAppend(kvp.key, kvp.value) - require.Equal(t, bz, append(kvp.key, kvp.value...)) - - incFirstByte(bz) - require.NotEqual(t, bz, append(kvp.key, kvp.value...)) - - bz = cloneAppend(kvp.key, kvp.value) - incFirstByte(kvp.key) - require.NotEqual(t, bz, append(kvp.key, kvp.value...)) - - bz = cloneAppend(kvp.key, kvp.value) - incFirstByte(kvp.value) - require.NotEqual(t, bz, append(kvp.key, kvp.value...)) - } -} - -func TestPrefixStoreIteratorEdgeCase(t *testing.T) { - db := dbm.NewMemDB() - baseStore := dbadapter.Store{DB: db} - - // overflow in cpIncr - prefix := []byte{0xAA, 0xFF, 0xFF} - prefixStore := NewStore(baseStore, prefix) - - // ascending order - baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{}) - - iter := prefixStore.Iterator(nil, nil) - - checkDomain(t, iter, nil, nil) - checkItem(t, iter, []byte{}, bz("")) - checkNext(t, iter, true) - checkItem(t, iter, []byte{0x00}, bz("")) - checkNext(t, iter, false) - - checkInvalid(t, iter) - - iter.Close() -} - -func TestPrefixStoreReverseIteratorEdgeCase(t *testing.T) { - db := dbm.NewMemDB() - baseStore := dbadapter.Store{DB: db} - - // overflow in cpIncr - prefix := []byte{0xAA, 0xFF, 0xFF} - prefixStore := NewStore(baseStore, prefix) - - // descending order - baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{}) - - iter := prefixStore.ReverseIterator(nil, nil) - - checkDomain(t, iter, nil, nil) - checkItem(t, iter, []byte{0x00}, bz("")) - checkNext(t, iter, true) - checkItem(t, iter, []byte{}, bz("")) - checkNext(t, iter, false) - - checkInvalid(t, iter) - - iter.Close() - - db = dbm.NewMemDB() - baseStore = dbadapter.Store{DB: db} - - // underflow in cpDecr - prefix = []byte{0xAA, 0x00, 0x00} - prefixStore = NewStore(baseStore, prefix) - - baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00, 0x01}, []byte{}) - baseStore.Set([]byte{0xAA, 0x00, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xA9, 0xFF, 0xFF, 0x00}, []byte{}) - baseStore.Set([]byte{0xA9, 0xFF, 0xFF}, []byte{}) - - iter = prefixStore.ReverseIterator(nil, nil) - - checkDomain(t, iter, nil, nil) - checkItem(t, iter, []byte{0x00}, bz("")) - checkNext(t, iter, true) - checkItem(t, iter, []byte{}, bz("")) - checkNext(t, iter, false) - - checkInvalid(t, iter) - - iter.Close() -} - -// Tests below are ported from https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db_test.go - -func mockStoreWithStuff() types.KVStore { - db := dbm.NewMemDB() - store := dbadapter.Store{DB: db} - // Under "key" prefix - store.Set(bz("key"), bz("value")) - store.Set(bz("key1"), bz("value1")) - store.Set(bz("key2"), bz("value2")) - store.Set(bz("key3"), bz("value3")) - store.Set(bz("something"), bz("else")) - store.Set(bz("k"), bz("val")) - store.Set(bz("ke"), bz("valu")) - store.Set(bz("kee"), bz("valuu")) - return store -} - -func checkValue(t *testing.T, store types.KVStore, key, expected []byte) { - t.Helper() - bz := store.Get(key) - require.Equal(t, expected, bz) -} - -func checkValid(t *testing.T, itr types.Iterator, expected bool) { - t.Helper() - valid := itr.Valid() - require.Equal(t, expected, valid) -} - -func checkNext(t *testing.T, itr types.Iterator, expected bool) { - t.Helper() - itr.Next() - valid := itr.Valid() - require.Equal(t, expected, valid) -} - -func checkDomain(t *testing.T, itr types.Iterator, start, end []byte) { - t.Helper() - ds, de := itr.Domain() - require.Equal(t, start, ds) - require.Equal(t, end, de) -} - -func checkItem(t *testing.T, itr types.Iterator, key, value []byte) { - t.Helper() - require.Exactly(t, key, itr.Key()) - require.Exactly(t, value, itr.Value()) -} - -func checkInvalid(t *testing.T, itr types.Iterator) { - t.Helper() - checkValid(t, itr, false) - checkKeyPanics(t, itr) - checkValuePanics(t, itr) - checkNextPanics(t, itr) -} - -func checkKeyPanics(t *testing.T, itr types.Iterator) { - t.Helper() - require.Panics(t, func() { itr.Key() }) -} - -func checkValuePanics(t *testing.T, itr types.Iterator) { - t.Helper() - require.Panics(t, func() { itr.Value() }) -} - -func checkNextPanics(t *testing.T, itr types.Iterator) { - t.Helper() - require.Panics(t, func() { itr.Next() }) -} - -func TestPrefixDBSimple(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - checkValue(t, pstore, bz("key"), nil) - checkValue(t, pstore, bz(""), bz("value")) - checkValue(t, pstore, bz("key1"), nil) - checkValue(t, pstore, bz("1"), bz("value1")) - checkValue(t, pstore, bz("key2"), nil) - checkValue(t, pstore, bz("2"), bz("value2")) - checkValue(t, pstore, bz("key3"), nil) - checkValue(t, pstore, bz("3"), bz("value3")) - checkValue(t, pstore, bz("something"), nil) - checkValue(t, pstore, bz("k"), nil) - checkValue(t, pstore, bz("ke"), nil) - checkValue(t, pstore, bz("kee"), nil) -} - -func TestPrefixDBIterator1(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(nil, nil) - checkDomain(t, itr, nil, nil) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBIterator2(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(nil, bz("")) - checkDomain(t, itr, nil, bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBIterator3(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(bz(""), nil) - checkDomain(t, itr, bz(""), nil) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBIterator4(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(bz(""), bz("")) - checkDomain(t, itr, bz(""), bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator1(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(nil, nil) - checkDomain(t, itr, nil, nil) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator2(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(bz(""), nil) - checkDomain(t, itr, bz(""), nil) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator3(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(nil, bz("")) - checkDomain(t, itr, nil, bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator4(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(bz(""), bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestCacheWraps(t *testing.T) { - db := dbm.NewMemDB() - store := dbadapter.Store{DB: db} - - cacheWrapper := store.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} diff --git a/store/pruning/README.md b/store/pruning/README.md deleted file mode 100644 index 2548807e2a..0000000000 --- a/store/pruning/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Pruning - -## Overview - -Pruning is the mechanism for deleting old application heights from the disk. Depending on the use case, -nodes may require different pruning strategies. For example, archive nodes must keep all -the states and prune nothing. On the other hand, a regular validator node may want to only keep 100 latest heights for performance reasons. - -## Strategies - -The strategies are configured in `app.toml`, with the format `pruning = ""` where the options are: - -* `default`: only the last 362,880 states(approximately 3.5 weeks worth of state) are kept; pruning at 10 block intervals -* `nothing`: all historic states will be saved, nothing will be deleted (i.e. archiving node) -* `everything`: 2 latest states will be kept; pruning at 10 block intervals. -* `custom`: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' - -If no strategy is given to the BaseApp, `nothing` is selected. However, we perform validation on the CLI layer to require these to be always set in the config file. - -## Custom Pruning - -These are applied if and only if the pruning strategy is custom: - -* `pruning-keep-recent`: N means to keep all of the last N states -* `pruning-interval`: N means to delete old states from disk every Nth block. - -## Relationship to State Sync Snapshots - -Snapshot settings are optional. However, if set, they have an effect on how pruning is done by -persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. See the "Relationship to Pruning" section in `snapshots/README.md` for more details. diff --git a/store/pruning/export_test.go b/store/pruning/export_test.go deleted file mode 100644 index 676ff132ff..0000000000 --- a/store/pruning/export_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package pruning - -var ( - PruneSnapshotHeightsKey = pruneSnapshotHeightsKey - - Int64SliceToBytes = int64SliceToBytes - LoadPruningSnapshotHeights = loadPruningSnapshotHeights -) diff --git a/store/pruning/manager.go b/store/pruning/manager.go deleted file mode 100644 index 9a99e49151..0000000000 --- a/store/pruning/manager.go +++ /dev/null @@ -1,191 +0,0 @@ -package pruning - -import ( - "encoding/binary" - "fmt" - "sort" - "sync" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/log" - "cosmossdk.io/store/pruning/types" -) - -// Manager is an abstraction to handle the logic needed for -// determining when to prune old heights of the store -// based on the strategy described by the pruning options. -type Manager struct { - db dbm.DB - logger log.Logger - opts types.PruningOptions - snapshotInterval uint64 - // Snapshots are taken in a separate goroutine from the regular execution - // and can be delivered asynchrounously via HandleSnapshotHeight. - // Therefore, we sync access to pruneSnapshotHeights with this mutex. - pruneSnapshotHeightsMx sync.RWMutex - // These are the heights that are multiples of snapshotInterval and kept for state sync snapshots. - // The heights are added to be pruned when a snapshot is complete. - pruneSnapshotHeights []int64 -} - -// NegativeHeightsError is returned when a negative height is provided to the manager. -type NegativeHeightsError struct { - Height int64 -} - -var _ error = &NegativeHeightsError{} - -func (e *NegativeHeightsError) Error() string { - return fmt.Sprintf("failed to get pruned heights: %d", e.Height) -} - -var pruneSnapshotHeightsKey = []byte("s/prunesnapshotheights") - -// NewManager returns a new Manager with the given db and logger. -// The retuned manager uses a pruning strategy of "nothing" which -// keeps all heights. Users of the Manager may change the strategy -// by calling SetOptions. -func NewManager(db dbm.DB, logger log.Logger) *Manager { - return &Manager{ - db: db, - logger: logger, - opts: types.NewPruningOptions(types.PruningNothing), - pruneSnapshotHeights: []int64{0}, - } -} - -// SetOptions sets the pruning strategy on the manager. -func (m *Manager) SetOptions(opts types.PruningOptions) { - m.opts = opts -} - -// GetOptions fetches the pruning strategy from the manager. -func (m *Manager) GetOptions() types.PruningOptions { - return m.opts -} - -// HandleSnapshotHeight persists the snapshot height to be pruned at the next appropriate -// height defined by the pruning strategy. It flushes the update to disk and panics if the flush fails. -// The input height must be greater than 0, and the pruning strategy must not be set to pruning nothing. -// If either of these conditions is not met, this function does nothing. -func (m *Manager) HandleSnapshotHeight(height int64) { - if m.opts.GetPruningStrategy() == types.PruningNothing || height <= 0 { - return - } - - m.pruneSnapshotHeightsMx.Lock() - defer m.pruneSnapshotHeightsMx.Unlock() - - m.logger.Debug("HandleSnapshotHeight", "height", height) - m.pruneSnapshotHeights = append(m.pruneSnapshotHeights, height) - sort.Slice(m.pruneSnapshotHeights, func(i, j int) bool { return m.pruneSnapshotHeights[i] < m.pruneSnapshotHeights[j] }) - k := 1 - for ; k < len(m.pruneSnapshotHeights); k++ { - if m.pruneSnapshotHeights[k] != m.pruneSnapshotHeights[k-1]+int64(m.snapshotInterval) { - break - } - } - m.pruneSnapshotHeights = m.pruneSnapshotHeights[k-1:] - - // flush the updates to disk so that they are not lost if crash happens. - if err := m.db.SetSync(pruneSnapshotHeightsKey, int64SliceToBytes(m.pruneSnapshotHeights)); err != nil { - panic(err) - } -} - -// SetSnapshotInterval sets the interval at which the snapshots are taken. -func (m *Manager) SetSnapshotInterval(snapshotInterval uint64) { - m.snapshotInterval = snapshotInterval -} - -// GetPruningHeight returns the height which can prune upto if it is able to prune at the given height. -func (m *Manager) GetPruningHeight(height int64) int64 { - if m.opts.GetPruningStrategy() == types.PruningNothing { - return 0 - } - if m.opts.Interval <= 0 { - return 0 - } - - if height%int64(m.opts.Interval) != 0 || height <= int64(m.opts.KeepRecent) { - return 0 - } - - // Consider the snapshot height - pruneHeight := height - 1 - int64(m.opts.KeepRecent) // we should keep the current height at least - - m.pruneSnapshotHeightsMx.RLock() - defer m.pruneSnapshotHeightsMx.RUnlock() - - // snapshotInterval is zero, indicating that all heights can be pruned - if m.snapshotInterval <= 0 { - return pruneHeight - } - - if len(m.pruneSnapshotHeights) == 0 { // the length should be greater than zero - return 0 - } - - // the snapshot `m.pruneSnapshotHeights[0]` is already operated, - // so we can prune upto `m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1` - snHeight := m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1 - if snHeight < pruneHeight { - return snHeight - } - return pruneHeight -} - -// LoadSnapshotHeights loads the snapshot heights from the database as a crash recovery. -func (m *Manager) LoadSnapshotHeights(db dbm.DB) error { - if m.opts.GetPruningStrategy() == types.PruningNothing { - return nil - } - - loadedPruneSnapshotHeights, err := loadPruningSnapshotHeights(db) - if err != nil { - return err - } - - if len(loadedPruneSnapshotHeights) > 0 { - m.pruneSnapshotHeightsMx.Lock() - defer m.pruneSnapshotHeightsMx.Unlock() - m.pruneSnapshotHeights = loadedPruneSnapshotHeights - } - - return nil -} - -func loadPruningSnapshotHeights(db dbm.DB) ([]int64, error) { - bz, err := db.Get(pruneSnapshotHeightsKey) - if err != nil { - return nil, fmt.Errorf("failed to get post-snapshot pruned heights: %w", err) - } - if len(bz) == 0 { - return []int64{}, nil - } - - pruneSnapshotHeights := make([]int64, len(bz)/8) - i, offset := 0, 0 - for offset < len(bz) { - h := int64(binary.BigEndian.Uint64(bz[offset : offset+8])) - if h < 0 { - return nil, &NegativeHeightsError{Height: h} - } - pruneSnapshotHeights[i] = h - i++ - offset += 8 - } - - return pruneSnapshotHeights, nil -} - -func int64SliceToBytes(slice []int64) []byte { - bz := make([]byte, 0, len(slice)*8) - for _, ph := range slice { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(ph)) - bz = append(bz, buf...) - } - return bz -} diff --git a/store/pruning/manager_test.go b/store/pruning/manager_test.go deleted file mode 100644 index 006891de85..0000000000 --- a/store/pruning/manager_test.go +++ /dev/null @@ -1,303 +0,0 @@ -package pruning_test - -import ( - "errors" - "fmt" - "testing" - - db "github.com/cosmos/cosmos-db" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/mock" - "cosmossdk.io/store/pruning" - "cosmossdk.io/store/pruning/types" -) - -const dbErr = "db error" - -func TestNewManager(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - require.Equal(t, types.PruningNothing, manager.GetOptions().GetPruningStrategy()) -} - -func TestStrategies(t *testing.T) { - testcases := map[string]struct { - strategy types.PruningOptions - snapshotInterval uint64 - strategyToAssert types.PruningStrategy - isValid bool - }{ - "prune nothing - no snapshot": { - strategy: types.NewPruningOptions(types.PruningNothing), - strategyToAssert: types.PruningNothing, - }, - "prune nothing - snapshot": { - strategy: types.NewPruningOptions(types.PruningNothing), - strategyToAssert: types.PruningNothing, - snapshotInterval: 100, - }, - "prune default - no snapshot": { - strategy: types.NewPruningOptions(types.PruningDefault), - strategyToAssert: types.PruningDefault, - }, - "prune default - snapshot": { - strategy: types.NewPruningOptions(types.PruningDefault), - strategyToAssert: types.PruningDefault, - snapshotInterval: 100, - }, - "prune everything - no snapshot": { - strategy: types.NewPruningOptions(types.PruningEverything), - strategyToAssert: types.PruningEverything, - }, - "prune everything - snapshot": { - strategy: types.NewPruningOptions(types.PruningEverything), - strategyToAssert: types.PruningEverything, - snapshotInterval: 100, - }, - "custom 100-10-15": { - strategy: types.NewCustomPruningOptions(100, 15), - snapshotInterval: 10, - strategyToAssert: types.PruningCustom, - }, - "custom 10-10-15": { - strategy: types.NewCustomPruningOptions(10, 15), - snapshotInterval: 10, - strategyToAssert: types.PruningCustom, - }, - "custom 100-0-15": { - strategy: types.NewCustomPruningOptions(100, 15), - snapshotInterval: 0, - strategyToAssert: types.PruningCustom, - }, - } - - for name, tc := range testcases { - tc := tc // Local copy to avoid shadowing. - t.Run(name, func(t *testing.T) { - t.Parallel() - - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - - curStrategy := tc.strategy - manager.SetSnapshotInterval(tc.snapshotInterval) - - pruneStrategy := curStrategy.GetPruningStrategy() - require.Equal(t, tc.strategyToAssert, pruneStrategy) - - // Validate strategy parameters - switch pruneStrategy { - case types.PruningDefault: - require.Equal(t, uint64(362880), curStrategy.KeepRecent) - require.Equal(t, uint64(10), curStrategy.Interval) - case types.PruningNothing: - require.Equal(t, uint64(0), curStrategy.KeepRecent) - require.Equal(t, uint64(0), curStrategy.Interval) - case types.PruningEverything: - require.Equal(t, uint64(2), curStrategy.KeepRecent) - require.Equal(t, uint64(10), curStrategy.Interval) - default: - // - } - - manager.SetOptions(curStrategy) - require.Equal(t, tc.strategy, manager.GetOptions()) - - curKeepRecent := curStrategy.KeepRecent - snHeight := int64(tc.snapshotInterval - 1) - for curHeight := int64(0); curHeight < 110000; curHeight++ { - if tc.snapshotInterval != 0 { - if curHeight > int64(tc.snapshotInterval) && curHeight%int64(tc.snapshotInterval) == int64(tc.snapshotInterval)-1 { - manager.HandleSnapshotHeight(curHeight - int64(tc.snapshotInterval) + 1) - snHeight = curHeight - } - } - - pruningHeightActual := manager.GetPruningHeight(curHeight) - curHeightStr := fmt.Sprintf("height: %d", curHeight) - - switch curStrategy.GetPruningStrategy() { - case types.PruningNothing: - require.Equal(t, int64(0), pruningHeightActual, curHeightStr) - default: - if curHeight > int64(curKeepRecent) && curHeight%int64(curStrategy.Interval) == 0 { - pruningHeightExpected := curHeight - int64(curKeepRecent) - 1 - if tc.snapshotInterval > 0 && snHeight < pruningHeightExpected { - pruningHeightExpected = snHeight - } - require.Equal(t, pruningHeightExpected, pruningHeightActual, curHeightStr) - } else { - require.Equal(t, int64(0), pruningHeightActual, curHeightStr) - } - } - } - }) - } -} - -func TestPruningHeight_Inputs(t *testing.T) { - keepRecent := int64(types.NewPruningOptions(types.PruningEverything).KeepRecent) - interval := int64(types.NewPruningOptions(types.PruningEverything).Interval) - - testcases := map[string]struct { - height int64 - expectedResult int64 - strategy types.PruningStrategy - }{ - "currentHeight is negative - prune everything - invalid currentHeight": { - -1, - 0, - types.PruningEverything, - }, - "currentHeight is zero - prune everything - invalid currentHeight": { - 0, - 0, - types.PruningEverything, - }, - "currentHeight is positive but within keep recent- prune everything - not kept": { - keepRecent, - 0, - types.PruningEverything, - }, - "currentHeight is positive and equal to keep recent+1 - no kept": { - keepRecent + 1, - 0, - types.PruningEverything, - }, - "currentHeight is positive and greater than keep recent+1 but not multiple of interval - no kept": { - keepRecent + 2, - 0, - types.PruningEverything, - }, - "currentHeight is positive and greater than keep recent+1 and multiple of interval - kept": { - interval, - interval - keepRecent - 1, - types.PruningEverything, - }, - "pruning nothing, currentHeight is positive and greater than keep recent - not kept": { - interval, - 0, - types.PruningNothing, - }, - } - - for name, tc := range testcases { - t.Run(name, func(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - manager.SetOptions(types.NewPruningOptions(tc.strategy)) - - pruningHeightActual := manager.GetPruningHeight(tc.height) - require.Equal(t, tc.expectedResult, pruningHeightActual) - }) - } -} - -func TestHandleSnapshotHeight_DbErr_Panic(t *testing.T) { - ctrl := gomock.NewController(t) - - // Setup - dbMock := mock.NewMockDB(ctrl) - - dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) - - manager := pruning.NewManager(dbMock, log.NewNopLogger()) - manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) - require.NotNil(t, manager) - - defer func() { - if r := recover(); r == nil { - t.Fail() - } - }() - - manager.HandleSnapshotHeight(10) -} - -func TestHandleSnapshotHeight_LoadFromDisk(t *testing.T) { - snapshotInterval := uint64(10) - - // Setup - db := db.NewMemDB() - manager := pruning.NewManager(db, log.NewNopLogger()) - require.NotNil(t, manager) - - manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) - manager.SetSnapshotInterval(snapshotInterval) - - expected := 0 - for snapshotHeight := int64(-1); snapshotHeight < 100; snapshotHeight++ { - snapshotHeightStr := fmt.Sprintf("snaphost height: %d", snapshotHeight) - if snapshotHeight > int64(snapshotInterval) && snapshotHeight%int64(snapshotInterval) == 1 { - // Test flush - manager.HandleSnapshotHeight(snapshotHeight - 1) - expected = 1 - } - - loadedSnapshotHeights, err := pruning.LoadPruningSnapshotHeights(db) - require.NoError(t, err) - require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr) - - // Test load back - err = manager.LoadSnapshotHeights(db) - require.NoError(t, err) - - loadedSnapshotHeights, err = pruning.LoadPruningSnapshotHeights(db) - require.NoError(t, err) - require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr) - } -} - -func TestLoadPruningSnapshotHeights(t *testing.T) { - var ( - manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - err error - ) - require.NotNil(t, manager) - - // must not be PruningNothing - manager.SetOptions(types.NewPruningOptions(types.PruningDefault)) - - testcases := map[string]struct { - getFlushedPruningSnapshotHeights func() []int64 - expectedResult error - }{ - "negative snapshotPruningHeight - error": { - getFlushedPruningSnapshotHeights: func() []int64 { - return []int64{5, -2, 3} - }, - expectedResult: &pruning.NegativeHeightsError{Height: -2}, - }, - "non-negative - success": { - getFlushedPruningSnapshotHeights: func() []int64 { - return []int64{5, 0, 3} - }, - }, - } - - for name, tc := range testcases { - t.Run(name, func(t *testing.T) { - db := db.NewMemDB() - - if tc.getFlushedPruningSnapshotHeights != nil { - err = db.Set(pruning.PruneSnapshotHeightsKey, pruning.Int64SliceToBytes(tc.getFlushedPruningSnapshotHeights())) - require.NoError(t, err) - } - - err = manager.LoadSnapshotHeights(db) - require.Equal(t, tc.expectedResult, err) - }) - } -} - -func TestLoadSnapshotHeights_PruneNothing(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - - manager.SetOptions(types.NewPruningOptions(types.PruningNothing)) - - require.Nil(t, manager.LoadSnapshotHeights(db.NewMemDB())) -} diff --git a/store/pruning/types/options.go b/store/pruning/types/options.go deleted file mode 100644 index 229dbed984..0000000000 --- a/store/pruning/types/options.go +++ /dev/null @@ -1,130 +0,0 @@ -package types - -import ( - "errors" - "fmt" -) - -// PruningOptions defines the pruning strategy used when determining which -// heights are removed from disk when committing state. -type PruningOptions struct { - // KeepRecent defines how many recent heights to keep on disk. - KeepRecent uint64 - - // Interval defines when the pruned heights are removed from disk. - Interval uint64 - - // Strategy defines the kind of pruning strategy. See below for more information on each. - Strategy PruningStrategy -} - -type PruningStrategy int - -// Pruning option string constants -const ( - PruningOptionDefault = "default" - PruningOptionEverything = "everything" - PruningOptionNothing = "nothing" - PruningOptionCustom = "custom" -) - -const ( - // PruningDefault defines a pruning strategy where the last 362880 heights are - // kept where to-be pruned heights are pruned at every 10th height. - // The last 362880 heights are kept(approximately 3.5 weeks worth of state) assuming the typical - // block time is 6s. If these values do not match the applications' requirements, use the "custom" option. - PruningDefault PruningStrategy = iota - // PruningEverything defines a pruning strategy where all committed heights are - // deleted, storing only the current height and last 2 states. To-be pruned heights are - // pruned at every 10th height. - PruningEverything - // PruningNothing defines a pruning strategy where all heights are kept on disk. - // This is the only stretegy where KeepEvery=1 is allowed with state-sync snapshots disabled. - PruningNothing - // PruningCustom defines a pruning strategy where the user specifies the pruning. - PruningCustom - // PruningUndefined defines an undefined pruning strategy. It is to be returned by stores that do not support pruning. - PruningUndefined -) - -const ( - pruneEverythingKeepRecent = 2 - pruneEverythingInterval = 10 -) - -var ( - ErrPruningIntervalZero = errors.New("'pruning-interval' must not be 0. If you want to disable pruning, select pruning = \"nothing\"") - ErrPruningIntervalTooSmall = fmt.Errorf("'pruning-interval' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingInterval) - ErrPruningKeepRecentTooSmall = fmt.Errorf("'pruning-keep-recent' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingKeepRecent) -) - -func NewPruningOptions(pruningStrategy PruningStrategy) PruningOptions { - switch pruningStrategy { - case PruningDefault: - return PruningOptions{ - KeepRecent: 362880, - Interval: 10, - Strategy: PruningDefault, - } - case PruningEverything: - return PruningOptions{ - KeepRecent: pruneEverythingKeepRecent, - Interval: pruneEverythingInterval, - Strategy: PruningEverything, - } - case PruningNothing: - return PruningOptions{ - KeepRecent: 0, - Interval: 0, - Strategy: PruningNothing, - } - default: - return PruningOptions{ - Strategy: PruningCustom, - } - } -} - -func NewCustomPruningOptions(keepRecent, interval uint64) PruningOptions { - return PruningOptions{ - KeepRecent: keepRecent, - Interval: interval, - Strategy: PruningCustom, - } -} - -func (po PruningOptions) GetPruningStrategy() PruningStrategy { - return po.Strategy -} - -func (po PruningOptions) Validate() error { - if po.Strategy == PruningNothing { - return nil - } - if po.Interval == 0 { - return ErrPruningIntervalZero - } - if po.Interval < pruneEverythingInterval { - return ErrPruningIntervalTooSmall - } - if po.KeepRecent < pruneEverythingKeepRecent { - return ErrPruningKeepRecentTooSmall - } - return nil -} - -func NewPruningOptionsFromString(strategy string) PruningOptions { - switch strategy { - case PruningOptionEverything: - return NewPruningOptions(PruningEverything) - - case PruningOptionNothing: - return NewPruningOptions(PruningNothing) - - case PruningOptionDefault: - return NewPruningOptions(PruningDefault) - - default: - return NewPruningOptions(PruningDefault) - } -} diff --git a/store/pruning/types/options_test.go b/store/pruning/types/options_test.go deleted file mode 100644 index abc6bf39e2..0000000000 --- a/store/pruning/types/options_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestPruningOptions_Validate(t *testing.T) { - testCases := []struct { - opts PruningOptions - expectErr error - }{ - {NewPruningOptions(PruningDefault), nil}, - {NewPruningOptions(PruningEverything), nil}, - {NewPruningOptions(PruningNothing), nil}, - {NewPruningOptions(PruningCustom), ErrPruningIntervalZero}, - {NewCustomPruningOptions(2, 10), nil}, - {NewCustomPruningOptions(100, 15), nil}, - {NewCustomPruningOptions(1, 10), ErrPruningKeepRecentTooSmall}, - {NewCustomPruningOptions(2, 9), ErrPruningIntervalTooSmall}, - {NewCustomPruningOptions(2, 0), ErrPruningIntervalZero}, - {NewCustomPruningOptions(2, 0), ErrPruningIntervalZero}, - } - - for _, tc := range testCases { - err := tc.opts.Validate() - require.Equal(t, tc.expectErr, err, "options: %v, err: %s", tc.opts, err) - } -} - -func TestPruningOptions_GetStrategy(t *testing.T) { - testCases := []struct { - opts PruningOptions - expectedStrategy PruningStrategy - }{ - {NewPruningOptions(PruningDefault), PruningDefault}, - {NewPruningOptions(PruningEverything), PruningEverything}, - {NewPruningOptions(PruningNothing), PruningNothing}, - {NewPruningOptions(PruningCustom), PruningCustom}, - {NewCustomPruningOptions(2, 10), PruningCustom}, - } - - for _, tc := range testCases { - actualStrategy := tc.opts.GetPruningStrategy() - require.Equal(t, tc.expectedStrategy, actualStrategy) - } -} - -func TestNewPruningOptionsFromString(t *testing.T) { - testCases := []struct { - optString string - expect PruningOptions - }{ - {PruningOptionDefault, NewPruningOptions(PruningDefault)}, - {PruningOptionEverything, NewPruningOptions(PruningEverything)}, - {PruningOptionNothing, NewPruningOptions(PruningNothing)}, - {"invalid", NewPruningOptions(PruningDefault)}, - } - - for _, tc := range testCases { - actual := NewPruningOptionsFromString(tc.optString) - require.Equal(t, tc.expect, actual) - } -} diff --git a/store/reexport.go b/store/reexport.go deleted file mode 100644 index 9865cb9b03..0000000000 --- a/store/reexport.go +++ /dev/null @@ -1,29 +0,0 @@ -package store - -import ( - "cosmossdk.io/store/types" -) - -// Import cosmos-sdk/types/store.go for convenience. -type ( - Store = types.Store - Committer = types.Committer - CommitStore = types.CommitStore - MultiStore = types.MultiStore - CacheMultiStore = types.CacheMultiStore - CommitMultiStore = types.CommitMultiStore - KVStore = types.KVStore - Iterator = types.Iterator - CacheKVStore = types.CacheKVStore - CommitKVStore = types.CommitKVStore - CacheWrapper = types.CacheWrapper - CacheWrap = types.CacheWrap - CommitID = types.CommitID - Key = types.StoreKey - Type = types.StoreType - Queryable = types.Queryable - TraceContext = types.TraceContext - Gas = types.Gas - GasMeter = types.GasMeter - GasConfig = types.GasConfig -) diff --git a/store/root/store.go b/store/root/store.go new file mode 100644 index 0000000000..abd30191fd --- /dev/null +++ b/store/root/store.go @@ -0,0 +1,364 @@ +package root + +import ( + "bytes" + "fmt" + "io" + "slices" + + "github.com/cockroachdb/errors" + ics23 "github.com/cosmos/ics23/go" + + "cosmossdk.io/log" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/branchkv" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/tracekv" +) + +// defaultStoreKey defines the default store key used for the single SC backend. +// Note, however, this store key is essentially irrelevant as it's not exposed +// to the user and it only needed to fulfill usage of StoreInfo during Commit. +const defaultStoreKey = "default" + +var _ store.RootStore = (*Store)(nil) + +// Store defines the SDK's default RootStore implementation. It contains a single +// State Storage (SS) backend and a single State Commitment (SC) backend. Note, +// this means all store keys are ignored and commitments exist in a single commitment +// tree. +type Store struct { + logger log.Logger + initialVersion uint64 + + // stateStore reflects the state storage backend + stateStore store.VersionedDatabase + + // stateCommitment reflects the state commitment (SC) backend + stateCommitment *commitment.Database + + // rootKVStore reflects the root BranchedKVStore that is used to accumulate writes + // and branch off of. + rootKVStore store.BranchedKVStore + + // commitHeader reflects the header used when committing state (note, this isn't required and only used for query purposes) + commitHeader store.CommitHeader + + // lastCommitInfo reflects the last version/hash that has been committed + lastCommitInfo *store.CommitInfo + + // workingHash defines the current (yet to be committed) hash + workingHash []byte + + // traceWriter defines a writer for store tracing operation + traceWriter io.Writer + + // traceContext defines the tracing context, if any, for trace operations + traceContext store.TraceContext +} + +func New( + logger log.Logger, + initVersion uint64, + ss store.VersionedDatabase, + sc *commitment.Database, +) (store.RootStore, error) { + rootKVStore, err := branchkv.New(defaultStoreKey, ss) + if err != nil { + return nil, err + } + + return &Store{ + logger: logger.With("module", "root_store"), + initialVersion: initVersion, + stateStore: ss, + stateCommitment: sc, + rootKVStore: rootKVStore, + }, nil +} + +// Close closes the store and resets all internal fields. Note, Close() is NOT +// idempotent and should only be called once. +func (s *Store) Close() (err error) { + err = errors.Join(err, s.stateStore.Close()) + err = errors.Join(err, s.stateCommitment.Close()) + + s.stateStore = nil + s.stateCommitment = nil + s.lastCommitInfo = nil + s.commitHeader = nil + + return err +} + +// MountSCStore performs a no-op as a SC backend must be provided at initialization. +func (s *Store) MountSCStore(_ string, _ store.Tree) error { + return errors.New("cannot mount SC store; SC must be provided on initialization") +} + +// GetSCStore returns the store's state commitment (SC) backend. Note, the store +// key is ignored as there exists only a single SC tree. +func (s *Store) GetSCStore(_ string) store.Tree { + return s.stateCommitment +} + +func (s *Store) LoadLatestVersion() error { + lv, err := s.GetLatestVersion() + if err != nil { + return err + } + + return s.loadVersion(lv, nil) +} + +// LastCommitID returns a CommitID based off of the latest internal CommitInfo. +// If an internal CommitInfo is not set, a new one will be returned with only the +// latest version set, which is based off of the SS view. +func (s *Store) LastCommitID() (store.CommitID, error) { + if s.lastCommitInfo != nil { + return s.lastCommitInfo.CommitID(), nil + } + + // XXX/TODO: We cannot use SS to get the latest version when lastCommitInfo + // is nil if SS is flushed asynchronously. This is because the latest version + // in SS might not be the latest version in the SC stores. + // + // Ref: https://github.com/cosmos/cosmos-sdk/issues/17314 + latestVersion, err := s.stateStore.GetLatestVersion() + if err != nil { + return store.CommitID{}, err + } + + // sanity check: ensure integrity of latest version against SC + scVersion := s.stateCommitment.GetLatestVersion() + if scVersion != latestVersion { + return store.CommitID{}, fmt.Errorf("SC and SS version mismatch; got: %d, expected: %d", scVersion, latestVersion) + } + + return store.CommitID{Version: latestVersion}, nil +} + +// GetLatestVersion returns the latest version based on the latest internal +// CommitInfo. An error is returned if the latest CommitInfo or version cannot +// be retrieved. +func (s *Store) GetLatestVersion() (uint64, error) { + lastCommitID, err := s.LastCommitID() + if err != nil { + return 0, err + } + + return lastCommitID.Version, nil +} + +// GetProof delegates the GetProof to the store's underlying SC backend. +func (s *Store) GetProof(_ string, version uint64, key []byte) (*ics23.CommitmentProof, error) { + return s.stateCommitment.GetProof(version, key) +} + +// LoadVersion loads a specific version returning an error upon failure. +func (s *Store) LoadVersion(v uint64) (err error) { + return s.loadVersion(v, nil) +} + +// GetKVStore returns the store's root KVStore. Any writes to this store without +// branching will be committed to SC and SS upon Commit(). Branching will create +// a branched KVStore that allow writes to be discarded and propagated to the +// root KVStore using Write(). +func (s *Store) GetKVStore(_ string) store.KVStore { + if s.TracingEnabled() { + return tracekv.New(s.rootKVStore, s.traceWriter, s.traceContext) + } + + return s.rootKVStore +} + +func (s *Store) GetBranchedKVStore(_ string) store.BranchedKVStore { + if s.TracingEnabled() { + return tracekv.New(s.rootKVStore, s.traceWriter, s.traceContext) + } + + return s.rootKVStore +} + +func (s *Store) loadVersion(v uint64, upgrades any) error { + s.logger.Debug("loading version", "version", v) + + if err := s.stateCommitment.LoadVersion(v); err != nil { + return fmt.Errorf("failed to load SS version %d: %w", v, err) + } + + // TODO: Complete this method to handle upgrades. See legacy RMS loadVersion() + // for reference. + // + // Ref: https://github.com/cosmos/cosmos-sdk/issues/17314 + + return nil +} + +func (s *Store) SetTracingContext(tc store.TraceContext) { + s.traceContext = tc +} + +func (s *Store) SetTracer(w io.Writer) { + s.traceWriter = w +} + +func (s *Store) TracingEnabled() bool { + return s.traceWriter != nil +} + +func (s *Store) SetCommitHeader(h store.CommitHeader) { + s.commitHeader = h +} + +// Branch a copy of the Store with a branched underlying root KVStore. Any call +// to GetKVStore and GetBranchedKVStore returns the branched KVStore. +func (s *Store) Branch() store.BranchedRootStore { + branch := s.rootKVStore.Branch() + + return &Store{ + logger: s.logger, + initialVersion: s.initialVersion, + stateStore: s.stateStore, + stateCommitment: s.stateCommitment, + rootKVStore: branch, + commitHeader: s.commitHeader, + lastCommitInfo: s.lastCommitInfo, + traceWriter: s.traceWriter, + traceContext: s.traceContext, + } +} + +// WorkingHash returns the working hash of the root store. Note, WorkingHash() +// should only be called once per block once all writes are complete and prior +// to Commit() being called. +// +// If working hash is nil, then we need to compute and set it on the root store +// by constructing a CommitInfo object, which in turn creates and writes a batch +// of the current changeset to the SC tree. +func (s *Store) WorkingHash() ([]byte, error) { + if s.workingHash == nil { + if err := s.writeSC(); err != nil { + return nil, err + } + + s.workingHash = s.lastCommitInfo.Hash() + } + + return slices.Clone(s.workingHash), nil +} + +func (s *Store) Write() { + s.rootKVStore.Write() +} + +// Commit commits all state changes to the underlying SS and SC backends. Note, +// at the time of Commit(), we expect WorkingHash() to have already been called, +// which internally sets the working hash, retrieved by writing a batch of the +// changeset to the SC tree, and CommitInfo on the root store. The changeset is +// retrieved from the rootKVStore and represents the entire set of writes to be +// committed. The same changeset is used to flush writes to the SS backend. +// +// Note, Commit() commits SC and SC synchronously. +func (s *Store) Commit() ([]byte, error) { + if s.workingHash == nil { + return nil, fmt.Errorf("working hash is nil; must call WorkingHash() before Commit()") + } + + version := s.lastCommitInfo.Version + + if s.commitHeader != nil && s.commitHeader.GetHeight() != version { + s.logger.Debug("commit header and version mismatch", "header_height", s.commitHeader.GetHeight(), "version", version) + } + + changeset := s.rootKVStore.GetChangeset() + + // commit SS + if err := s.stateStore.ApplyChangeset(version, changeset); err != nil { + return nil, fmt.Errorf("failed to commit SS: %w", err) + } + + // commit SC + if err := s.commitSC(); err != nil { + return nil, fmt.Errorf("failed to commit SC stores: %w", err) + } + + if s.commitHeader != nil { + s.lastCommitInfo.Timestamp = s.commitHeader.GetTime() + } + + if err := s.rootKVStore.Reset(); err != nil { + return nil, fmt.Errorf("failed to reset root KVStore: %w", err) + } + + s.workingHash = nil + + return s.lastCommitInfo.Hash(), nil +} + +// writeSC gets the current changeset from the rootKVStore and writes that as a +// batch to the underlying SC tree, which allows us to retrieve the working hash +// of the SC tree. Finally, we construct a *CommitInfo and return the hash. +// Note, this should only be called once per block! +func (s *Store) writeSC() error { + changeSet := s.rootKVStore.GetChangeset() + + if err := s.stateCommitment.WriteBatch(changeSet); err != nil { + return fmt.Errorf("failed to write batch to SC store: %w", err) + } + + var previousHeight, version uint64 + if s.lastCommitInfo.GetVersion() == 0 && s.initialVersion > 1 { + // This case means that no commit has been made in the store, we + // start from initialVersion. + version = s.initialVersion + } else { + // This case can means two things: + // + // 1. There was already a previous commit in the store, in which case we + // increment the version from there. + // 2. There was no previous commit, and initial version was not set, in which + // case we start at version 1. + previousHeight = s.lastCommitInfo.GetVersion() + version = previousHeight + 1 + } + + workingHash := s.stateCommitment.WorkingHash() + + s.lastCommitInfo = &store.CommitInfo{ + Version: version, + StoreInfos: []store.StoreInfo{ + { + Name: defaultStoreKey, + CommitID: store.CommitID{ + Version: version, + Hash: workingHash, + }, + }, + }, + } + + return nil +} + +// commitSC commits the SC store. At this point, a batch of the current changeset +// should have already been written to the SC via WorkingHash(). This method +// solely commits that batch. An error is returned if commit fails or if the +// resulting commit hash is not equivalent to the working hash. +func (s *Store) commitSC() error { + commitBz, err := s.stateCommitment.Commit() + if err != nil { + return fmt.Errorf("failed to commit SC store: %w", err) + } + + workingHash, err := s.WorkingHash() + if err != nil { + return fmt.Errorf("failed to get working hash: %w", err) + } + + if bytes.Equal(commitBz, workingHash) { + return fmt.Errorf("unexpected commit hash; got: %X, expected: %X", commitBz, workingHash) + } + + return nil +} diff --git a/store/root/store_test.go b/store/root/store_test.go new file mode 100644 index 0000000000..67d0852fd8 --- /dev/null +++ b/store/root/store_test.go @@ -0,0 +1,220 @@ +package root + +import ( + "fmt" + "io" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/suite" + + "cosmossdk.io/log" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/commitment" + "cosmossdk.io/store/v2/commitment/iavl" + "cosmossdk.io/store/v2/storage/sqlite" +) + +type RootStoreTestSuite struct { + suite.Suite + + rootStore store.RootStore +} + +func TestStorageTestSuite(t *testing.T) { + suite.Run(t, &RootStoreTestSuite{}) +} + +func (s *RootStoreTestSuite) SetupTest() { + noopLog := log.NewNopLogger() + + ss, err := sqlite.New(s.T().TempDir()) + s.Require().NoError(err) + + tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) + sc := commitment.NewDatabase(tree) + + rs, err := New(noopLog, 1, ss, sc) + s.Require().NoError(err) + + rs.SetTracer(io.Discard) + rs.SetTracingContext(store.TraceContext{ + "test": s.T().Name(), + }) + + s.rootStore = rs +} + +func (s *RootStoreTestSuite) TearDownTest() { + err := s.rootStore.Close() + s.Require().NoError(err) +} + +func (s *RootStoreTestSuite) TestMountSCStore() { + s.Require().Error(s.rootStore.MountSCStore("", nil)) +} + +func (s *RootStoreTestSuite) TestGetSCStore() { + s.Require().Equal(s.rootStore.GetSCStore(""), s.rootStore.(*Store).stateCommitment) +} + +func (s *RootStoreTestSuite) TestGetKVStore() { + kvs := s.rootStore.GetKVStore("") + s.Require().NotNil(kvs) +} + +func (s *RootStoreTestSuite) TestGetBranchedKVStore() { + bs := s.rootStore.GetBranchedKVStore("") + s.Require().NotNil(bs) + s.Require().Empty(bs.GetChangeset().Pairs) +} + +func (s *RootStoreTestSuite) TestGetProof() { + p, err := s.rootStore.GetProof("", 1, []byte("foo")) + s.Require().Error(err) + s.Require().Nil(p) + + // write and commit a changeset + bs := s.rootStore.GetBranchedKVStore("") + bs.Set([]byte("foo"), []byte("bar")) + + workingHash, err := s.rootStore.WorkingHash() + s.Require().NoError(err) + s.Require().NotNil(workingHash) + + commitHash, err := s.rootStore.Commit() + s.Require().NoError(err) + s.Require().NotNil(commitHash) + s.Require().Equal(workingHash, commitHash) + + // ensure the proof is non-nil for the corresponding version + p, err = s.rootStore.GetProof("", 1, []byte("foo")) + s.Require().NoError(err) + s.Require().NotNil(p) + s.Require().Equal([]byte("foo"), p.GetExist().Key) + s.Require().Equal([]byte("bar"), p.GetExist().Value) +} + +func (s *RootStoreTestSuite) TestBranch() { + // write and commit a changeset + bs := s.rootStore.GetKVStore("") + bs.Set([]byte("foo"), []byte("bar")) + + workingHash, err := s.rootStore.WorkingHash() + s.Require().NoError(err) + s.Require().NotNil(workingHash) + + commitHash, err := s.rootStore.Commit() + s.Require().NoError(err) + s.Require().NotNil(commitHash) + s.Require().Equal(workingHash, commitHash) + + // branch the root store + rs2 := s.rootStore.Branch() + + // ensure we can perform reads which pass through to the original root store + bs2 := rs2.GetKVStore("") + s.Require().Equal([]byte("bar"), bs2.Get([]byte("foo"))) + + // make a change to the branched root store + bs2.Set([]byte("foo"), []byte("updated_bar")) + + // ensure the original root store is not modified + s.Require().Equal([]byte("bar"), bs.Get([]byte("foo"))) + + // write changes + rs2.Write() + + // ensure changes are reflected in the original root store + s.Require().Equal([]byte("updated_bar"), bs.Get([]byte("foo"))) +} + +func (s *RootStoreTestSuite) TestMultiBranch() { + // write and commit a changeset + bs := s.rootStore.GetKVStore("") + bs.Set([]byte("foo"), []byte("bar")) + + workingHash, err := s.rootStore.WorkingHash() + s.Require().NoError(err) + s.Require().NotNil(workingHash) + + commitHash, err := s.rootStore.Commit() + s.Require().NoError(err) + s.Require().NotNil(commitHash) + s.Require().Equal(workingHash, commitHash) + + // create multiple branches of the root store + var branchedRootStores []store.BranchedRootStore + for i := 0; i < 5; i++ { + branchedRootStores = append(branchedRootStores, s.rootStore.Branch()) + } + + // get the last branched root store + rs2 := branchedRootStores[4] + + // ensure we can perform reads which pass through to the original root store + bs2 := rs2.GetKVStore("") + s.Require().Equal([]byte("bar"), bs2.Get([]byte("foo"))) + + // make a change to the branched root store + bs2.Set([]byte("foo"), []byte("updated_bar")) + + // ensure the original root store is not modified + s.Require().Equal([]byte("bar"), bs.Get([]byte("foo"))) + + // write changes + rs2.Write() + + // ensure changes are reflected in the original root store + s.Require().Equal([]byte("updated_bar"), bs.Get([]byte("foo"))) +} + +func (s *RootStoreTestSuite) TestCommit() { + lv, err := s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Zero(lv) + + // branch the root store + rs2 := s.rootStore.Branch() + + // perform changes + bs2 := rs2.GetKVStore("") + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + bs2.Set([]byte(key), []byte(val)) + } + + // write to the branched root store, which will flush to the parent root store + rs2.Write() + + // committing w/o calling WorkingHash should error + _, err = s.rootStore.Commit() + s.Require().Error(err) + + // execute WorkingHash and Commit + wHash, err := s.rootStore.WorkingHash() + s.Require().NoError(err) + + cHash, err := s.rootStore.Commit() + s.Require().NoError(err) + s.Require().Equal(wHash, cHash) + + // ensure latest version is updated + lv, err = s.rootStore.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(1), lv) + + // ensure the root KVStore is cleared + s.Require().Empty(s.rootStore.(*Store).rootKVStore.GetChangeset().Pairs) + + // perform reads on the updated root store + bs := s.rootStore.GetKVStore("") + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + s.Require().Equal([]byte(val), bs.Get([]byte(key))) + } +} diff --git a/store/rootmulti/dbadapter.go b/store/rootmulti/dbadapter.go deleted file mode 100644 index 65cd41c66a..0000000000 --- a/store/rootmulti/dbadapter.go +++ /dev/null @@ -1,49 +0,0 @@ -package rootmulti - -import ( - "cosmossdk.io/store/dbadapter" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -var commithash = []byte("FAKE_HASH") - -var ( - _ types.KVStore = (*commitDBStoreAdapter)(nil) - _ types.Committer = (*commitDBStoreAdapter)(nil) -) - -//---------------------------------------- -// commitDBStoreWrapper should only be used for simulation/debugging, -// as it doesn't compute any commit hash, and it cannot load older state. - -// Wrapper type for dbm.Db with implementation of KVStore -type commitDBStoreAdapter struct { - dbadapter.Store -} - -func (cdsa commitDBStoreAdapter) Commit() types.CommitID { - return types.CommitID{ - Version: -1, - Hash: commithash, - } -} - -func (cdsa commitDBStoreAdapter) LastCommitID() types.CommitID { - return types.CommitID{ - Version: -1, - Hash: commithash, - } -} - -func (cdsa commitDBStoreAdapter) WorkingHash() []byte { - return commithash -} - -func (cdsa commitDBStoreAdapter) SetPruning(_ pruningtypes.PruningOptions) {} - -// GetPruning is a no-op as pruning options cannot be directly set on this store. -// They must be set on the root commit multi-store. -func (cdsa commitDBStoreAdapter) GetPruning() pruningtypes.PruningOptions { - return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) -} diff --git a/store/rootmulti/proof.go b/store/rootmulti/proof.go deleted file mode 100644 index 78217a1600..0000000000 --- a/store/rootmulti/proof.go +++ /dev/null @@ -1,27 +0,0 @@ -package rootmulti - -import ( - "github.com/cometbft/cometbft/crypto/merkle" - - storetypes "cosmossdk.io/store/types" -) - -// RequireProof returns whether proof is required for the subpath. -func RequireProof(subpath string) bool { - // XXX: create a better convention. - // Currently, only when query subpath is "/key", will proof be included in - // response. If there are some changes about proof building in iavlstore.go, - // we must change code here to keep consistency with iavlStore#Query. - return subpath == "/key" -} - -//----------------------------------------------------------------------------- - -// XXX: This should be managed by the rootMultiStore which may want to register -// more proof ops? -func DefaultProofRuntime() (prt *merkle.ProofRuntime) { - prt = merkle.NewProofRuntime() - prt.RegisterOpDecoder(storetypes.ProofOpIAVLCommitment, storetypes.CommitmentOpDecoder) - prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder) - return -} diff --git a/store/rootmulti/proof_test.go b/store/rootmulti/proof_test.go deleted file mode 100644 index d573937c3d..0000000000 --- a/store/rootmulti/proof_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package rootmulti - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/types" -) - -func TestVerifyIAVLStoreQueryProof(t *testing.T) { - // Create main tree for testing. - db := dbm.NewMemDB() - iStore, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - store := iStore.(*iavl.Store) - require.Nil(t, err) - store.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := store.Commit() - - // Get Proof - res, err := store.Query(&types.RequestQuery{ - Path: "/key", // required path to get key/value+proof - Data: []byte("MYKEY"), - Prove: true, - }) - require.NoError(t, err) - require.NotNil(t, res.ProofOps) - - // Verify proof. - prt := DefaultProofRuntime() - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE")) - require.Nil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY_NOT", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE_NOT")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte(nil)) - require.NotNil(t, err) -} - -func TestVerifyMultiStoreQueryProof(t *testing.T) { - // Create main tree for testing. - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - iavlStoreKey := types.NewKVStoreKey("iavlStoreKey") - - store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil) - require.NoError(t, store.LoadVersion(0)) - - iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store) - iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := store.Commit() - - // Get Proof - res, err := store.Query(&types.RequestQuery{ - Path: "/iavlStoreKey/key", // required path to get key/value+proof - Data: []byte("MYKEY"), - Prove: true, - }) - require.NoError(t, err) - require.NotNil(t, res.ProofOps) - - // Verify proof. - prt := DefaultProofRuntime() - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE")) - require.Nil(t, err) - - // Verify proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE")) - require.Nil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY_NOT", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "iavlStoreKey/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE_NOT")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte(nil)) - require.NotNil(t, err) -} - -func TestVerifyMultiStoreQueryProofAbsence(t *testing.T) { - // Create main tree for testing. - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - iavlStoreKey := types.NewKVStoreKey("iavlStoreKey") - - store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil) - err := store.LoadVersion(0) - require.NoError(t, err) - - iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store) - iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := store.Commit() // Commit with empty iavl store. - - // Get Proof - res, err := store.Query(&types.RequestQuery{ - Path: "/iavlStoreKey/key", // required path to get key/value+proof - Data: []byte("MYABSENTKEY"), - Prove: true, - }) - require.NoError(t, err) - require.NotNil(t, res.ProofOps) - - // Verify proof. - prt := DefaultProofRuntime() - err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY") - require.Nil(t, err) - - // Verify (bad) proof. - prt = DefaultProofRuntime() - err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/MYABSENTKEY") - require.NotNil(t, err) - - // Verify (bad) proof. - prt = DefaultProofRuntime() - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY", []byte("")) - require.NotNil(t, err) -} diff --git a/store/rootmulti/snapshot_test.go b/store/rootmulti/snapshot_test.go deleted file mode 100644 index 635be92970..0000000000 --- a/store/rootmulti/snapshot_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package rootmulti_test - -import ( - "crypto/sha256" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "io" - "math/rand" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/rootmulti" - "cosmossdk.io/store/snapshots" - snapshottypes "cosmossdk.io/store/snapshots/types" - "cosmossdk.io/store/types" -) - -func newMultiStoreWithGeneratedData(db dbm.DB, stores uint8, storeKeys uint64) *rootmulti.Store { - multiStore := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - r := rand.New(rand.NewSource(49872768940)) // Fixed seed for deterministic tests - - keys := []*types.KVStoreKey{} - for i := uint8(0); i < stores; i++ { - key := types.NewKVStoreKey(fmt.Sprintf("store%v", i)) - multiStore.MountStoreWithDB(key, types.StoreTypeIAVL, nil) - keys = append(keys, key) - } - err := multiStore.LoadLatestVersion() - if err != nil { - panic(err) - } - - for _, key := range keys { - store := multiStore.GetCommitKVStore(key).(*iavl.Store) - for i := uint64(0); i < storeKeys; i++ { - k := make([]byte, 8) - v := make([]byte, 1024) - binary.BigEndian.PutUint64(k, i) - _, err := r.Read(v) - if err != nil { - panic(err) - } - store.Set(k, v) - } - } - - multiStore.Commit() - err = multiStore.LoadLatestVersion() - if err != nil { - panic(err) - } - - return multiStore -} - -func newMultiStoreWithMixedMounts(db dbm.DB) *rootmulti.Store { - store := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("iavl3"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) - if err := store.LoadLatestVersion(); err != nil { - panic(err) - } - return store -} - -func newMultiStoreWithMixedMountsAndBasicData(db dbm.DB) *rootmulti.Store { - store := newMultiStoreWithMixedMounts(db) - store1 := store.GetStoreByName("iavl1").(types.CommitKVStore) - store2 := store.GetStoreByName("iavl2").(types.CommitKVStore) - trans1 := store.GetStoreByName("trans1").(types.KVStore) - - store1.Set([]byte("a"), []byte{1}) - store1.Set([]byte("b"), []byte{1}) - store2.Set([]byte("X"), []byte{255}) - store2.Set([]byte("A"), []byte{101}) - trans1.Set([]byte("x1"), []byte{91}) - store.Commit() - - store1.Set([]byte("b"), []byte{2}) - store1.Set([]byte("c"), []byte{3}) - store2.Set([]byte("B"), []byte{102}) - store.Commit() - - store2.Set([]byte("C"), []byte{103}) - store2.Delete([]byte("X")) - trans1.Set([]byte("x2"), []byte{92}) - store.Commit() - - return store -} - -func assertStoresEqual(t *testing.T, expect, actual types.CommitKVStore, msgAndArgs ...interface{}) { - t.Helper() - assert.Equal(t, expect.LastCommitID(), actual.LastCommitID()) - expectIter := expect.Iterator(nil, nil) - expectMap := map[string][]byte{} - for ; expectIter.Valid(); expectIter.Next() { - expectMap[string(expectIter.Key())] = expectIter.Value() - } - require.NoError(t, expectIter.Error()) - - actualIter := expect.Iterator(nil, nil) - actualMap := map[string][]byte{} - for ; actualIter.Valid(); actualIter.Next() { - actualMap[string(actualIter.Key())] = actualIter.Value() - } - require.NoError(t, actualIter.Error()) - - assert.Equal(t, expectMap, actualMap, msgAndArgs...) -} - -func TestMultistoreSnapshot_Checksum(t *testing.T) { - // Chunks from different nodes must fit together, so all nodes must produce identical chunks. - // This checksum test makes sure that the byte stream remains identical. If the test fails - // without having changed the data (e.g. because the Protobuf or zlib encoding changes), - // snapshottypes.CurrentFormat must be bumped. - store := newMultiStoreWithGeneratedData(dbm.NewMemDB(), 5, 10000) - version := uint64(store.LastCommitID().Version) - - testcases := []struct { - format uint32 - chunkHashes []string - }{ - {1, []string{ - "503e5b51b657055b77e88169fadae543619368744ad15f1de0736c0a20482f24", - "e1a0daaa738eeb43e778aefd2805e3dd720798288a410b06da4b8459c4d8f72e", - "aa048b4ee0f484965d7b3b06822cf0772cdcaad02f3b1b9055e69f2cb365ef3c", - "7921eaa3ed4921341e504d9308a9877986a879fe216a099c86e8db66fcba4c63", - "a4a864e6c02c9fca5837ec80dc84f650b25276ed7e4820cf7516ced9f9901b86", - "980925390cc50f14998ecb1e87de719ca9dd7e72f5fefbe445397bf670f36c31", - }}, - } - for _, tc := range testcases { - tc := tc - t.Run(fmt.Sprintf("Format %v", tc.format), func(t *testing.T) { - ch := make(chan io.ReadCloser) - go func() { - streamWriter := snapshots.NewStreamWriter(ch) - defer streamWriter.Close() - require.NotNil(t, streamWriter) - err := store.Snapshot(version, streamWriter) - require.NoError(t, err) - }() - hashes := []string{} - hasher := sha256.New() - for chunk := range ch { - hasher.Reset() - _, err := io.Copy(hasher, chunk) - require.NoError(t, err) - hashes = append(hashes, hex.EncodeToString(hasher.Sum(nil))) - } - assert.Equal(t, tc.chunkHashes, hashes, - "Snapshot output for format %v has changed", tc.format) - }) - } -} - -func TestMultistoreSnapshot_Errors(t *testing.T) { - store := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB()) - - testcases := map[string]struct { - height uint64 - expectType error - }{ - "0 height": {0, nil}, - "unknown height": {9, nil}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - err := store.Snapshot(tc.height, nil) - require.Error(t, err) - if tc.expectType != nil { - assert.True(t, errors.Is(err, tc.expectType)) - } - }) - } -} - -func TestMultistoreSnapshotRestore(t *testing.T) { - source := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB()) - target := newMultiStoreWithMixedMounts(dbm.NewMemDB()) - version := uint64(source.LastCommitID().Version) - require.EqualValues(t, 3, version) - dummyExtensionItem := snapshottypes.SnapshotItem{ - Item: &snapshottypes.SnapshotItem_Extension{ - Extension: &snapshottypes.SnapshotExtensionMeta{ - Name: "test", - Format: 1, - }, - }, - } - - chunks := make(chan io.ReadCloser, 100) - go func() { - streamWriter := snapshots.NewStreamWriter(chunks) - require.NotNil(t, streamWriter) - defer streamWriter.Close() - err := source.Snapshot(version, streamWriter) - require.NoError(t, err) - // write an extension metadata - err = streamWriter.WriteMsg(&dummyExtensionItem) - require.NoError(t, err) - }() - - streamReader, err := snapshots.NewStreamReader(chunks) - require.NoError(t, err) - nextItem, err := target.Restore(version, snapshottypes.CurrentFormat, streamReader) - require.NoError(t, err) - require.Equal(t, *dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) - - assert.Equal(t, source.LastCommitID(), target.LastCommitID()) - for _, key := range source.StoreKeysByName() { - sourceStore := source.GetStoreByName(key.Name()).(types.CommitKVStore) - targetStore := target.GetStoreByName(key.Name()).(types.CommitKVStore) - switch sourceStore.GetStoreType() { - case types.StoreTypeTransient: - assert.False(t, targetStore.Iterator(nil, nil).Valid(), - "transient store %v not empty", key.Name()) - default: - assertStoresEqual(t, sourceStore, targetStore, "store %q not equal", key.Name()) - } - } -} - -func benchmarkMultistoreSnapshot(b *testing.B, stores uint8, storeKeys uint64) { - b.Helper() - b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.") - - b.ReportAllocs() - b.StopTimer() - source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys) - version := source.LastCommitID().Version - require.EqualValues(b, 1, version) - b.StartTimer() - - for i := 0; i < b.N; i++ { - target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics()) - for _, key := range source.StoreKeysByName() { - target.MountStoreWithDB(key, types.StoreTypeIAVL, nil) - } - err := target.LoadLatestVersion() - require.NoError(b, err) - require.EqualValues(b, 0, target.LastCommitID().Version) - - chunks := make(chan io.ReadCloser) - go func() { - streamWriter := snapshots.NewStreamWriter(chunks) - require.NotNil(b, streamWriter) - err := source.Snapshot(uint64(version), streamWriter) - require.NoError(b, err) - }() - for reader := range chunks { - _, err := io.Copy(io.Discard, reader) - require.NoError(b, err) - err = reader.Close() - require.NoError(b, err) - } - } -} - -func benchmarkMultistoreSnapshotRestore(b *testing.B, stores uint8, storeKeys uint64) { - b.Helper() - b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.") - - b.ReportAllocs() - b.StopTimer() - source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys) - version := uint64(source.LastCommitID().Version) - require.EqualValues(b, 1, version) - b.StartTimer() - - for i := 0; i < b.N; i++ { - target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics()) - for _, key := range source.StoreKeysByName() { - target.MountStoreWithDB(key, types.StoreTypeIAVL, nil) - } - err := target.LoadLatestVersion() - require.NoError(b, err) - require.EqualValues(b, 0, target.LastCommitID().Version) - - chunks := make(chan io.ReadCloser) - go func() { - writer := snapshots.NewStreamWriter(chunks) - require.NotNil(b, writer) - err := source.Snapshot(version, writer) - require.NoError(b, err) - }() - reader, err := snapshots.NewStreamReader(chunks) - require.NoError(b, err) - _, err = target.Restore(version, snapshottypes.CurrentFormat, reader) - require.NoError(b, err) - require.Equal(b, source.LastCommitID(), target.LastCommitID()) - } -} - -func BenchmarkMultistoreSnapshot100K(b *testing.B) { - benchmarkMultistoreSnapshot(b, 10, 10000) -} - -func BenchmarkMultistoreSnapshot1M(b *testing.B) { - benchmarkMultistoreSnapshot(b, 10, 100000) -} - -func BenchmarkMultistoreSnapshotRestore100K(b *testing.B) { - benchmarkMultistoreSnapshotRestore(b, 10, 10000) -} - -func BenchmarkMultistoreSnapshotRestore1M(b *testing.B) { - benchmarkMultistoreSnapshotRestore(b, 10, 100000) -} diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go deleted file mode 100644 index 7b2c05ff77..0000000000 --- a/store/rootmulti/store.go +++ /dev/null @@ -1,1230 +0,0 @@ -package rootmulti - -import ( - "errors" - "fmt" - "io" - "math" - "sort" - "strings" - "sync" - - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - dbm "github.com/cosmos/cosmos-db" - protoio "github.com/cosmos/gogoproto/io" - gogotypes "github.com/cosmos/gogoproto/types" - iavltree "github.com/cosmos/iavl" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/store/cachemulti" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/listenkv" - "cosmossdk.io/store/mem" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/pruning" - pruningtypes "cosmossdk.io/store/pruning/types" - snapshottypes "cosmossdk.io/store/snapshots/types" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/transient" - "cosmossdk.io/store/types" -) - -const ( - latestVersionKey = "s/latest" - commitInfoKeyFmt = "s/%d" // s/ -) - -const iavlDisablefastNodeDefault = false - -// keysFromStoreKeyMap returns a slice of keys for the provided map lexically sorted by StoreKey.Name() -func keysFromStoreKeyMap[V any](m map[types.StoreKey]V) []types.StoreKey { - keys := make([]types.StoreKey, 0, len(m)) - for key := range m { - keys = append(keys, key) - } - sort.Slice(keys, func(i, j int) bool { - ki, kj := keys[i], keys[j] - return ki.Name() < kj.Name() - }) - return keys -} - -// Store is composed of many CommitStores. Name contrasts with -// cacheMultiStore which is used for branching other MultiStores. It implements -// the CommitMultiStore interface. -type Store struct { - db dbm.DB - logger log.Logger - lastCommitInfo *types.CommitInfo - pruningManager *pruning.Manager - iavlCacheSize int - iavlDisableFastNode bool - storesParams map[types.StoreKey]storeParams - stores map[types.StoreKey]types.CommitKVStore - keysByName map[string]types.StoreKey - initialVersion int64 - removalMap map[types.StoreKey]bool - traceWriter io.Writer - traceContext types.TraceContext - traceContextMutex sync.Mutex - interBlockCache types.MultiStorePersistentCache - listeners map[types.StoreKey]*types.MemoryListener - metrics metrics.StoreMetrics - commitHeader cmtproto.Header -} - -var ( - _ types.CommitMultiStore = (*Store)(nil) - _ types.Queryable = (*Store)(nil) -) - -// NewStore returns a reference to a new Store object with the provided DB. The -// store will be created with a PruneNothing pruning strategy by default. After -// a store is created, KVStores must be mounted and finally LoadLatestVersion or -// LoadVersion must be called. -func NewStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) *Store { - return &Store{ - db: db, - logger: logger, - iavlCacheSize: iavl.DefaultIAVLCacheSize, - iavlDisableFastNode: iavlDisablefastNodeDefault, - storesParams: make(map[types.StoreKey]storeParams), - stores: make(map[types.StoreKey]types.CommitKVStore), - keysByName: make(map[string]types.StoreKey), - listeners: make(map[types.StoreKey]*types.MemoryListener), - removalMap: make(map[types.StoreKey]bool), - pruningManager: pruning.NewManager(db, logger), - metrics: metricGatherer, - } -} - -// GetPruning fetches the pruning strategy from the root store. -func (rs *Store) GetPruning() pruningtypes.PruningOptions { - return rs.pruningManager.GetOptions() -} - -// SetPruning sets the pruning strategy on the root store and all the sub-stores. -// Note, calling SetPruning on the root store prior to LoadVersion or -// LoadLatestVersion performs a no-op as the stores aren't mounted yet. -func (rs *Store) SetPruning(pruningOpts pruningtypes.PruningOptions) { - rs.pruningManager.SetOptions(pruningOpts) -} - -// SetMetrics sets the metrics gatherer for the store package -func (rs *Store) SetMetrics(metrics metrics.StoreMetrics) { - rs.metrics = metrics -} - -// SetSnapshotInterval sets the interval at which the snapshots are taken. -// It is used by the store to determine which heights to retain until after the snapshot is complete. -func (rs *Store) SetSnapshotInterval(snapshotInterval uint64) { - rs.pruningManager.SetSnapshotInterval(snapshotInterval) -} - -func (rs *Store) SetIAVLCacheSize(cacheSize int) { - rs.iavlCacheSize = cacheSize -} - -func (rs *Store) SetIAVLDisableFastNode(disableFastNode bool) { - rs.iavlDisableFastNode = disableFastNode -} - -// GetStoreType implements Store. -func (rs *Store) GetStoreType() types.StoreType { - return types.StoreTypeMulti -} - -// MountStoreWithDB implements CommitMultiStore. -func (rs *Store) MountStoreWithDB(key types.StoreKey, typ types.StoreType, db dbm.DB) { - if key == nil { - panic("MountIAVLStore() key cannot be nil") - } - if _, ok := rs.storesParams[key]; ok { - panic(fmt.Sprintf("store duplicate store key %v", key)) - } - if _, ok := rs.keysByName[key.Name()]; ok { - panic(fmt.Sprintf("store duplicate store key name %v", key)) - } - rs.storesParams[key] = newStoreParams(key, db, typ, 0) - rs.keysByName[key.Name()] = key -} - -// GetCommitStore returns a mounted CommitStore for a given StoreKey. If the -// store is wrapped in an inter-block cache, it will be unwrapped before returning. -func (rs *Store) GetCommitStore(key types.StoreKey) types.CommitStore { - return rs.GetCommitKVStore(key) -} - -// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the -// store is wrapped in an inter-block cache, it will be unwrapped before returning. -func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { - // If the Store has an inter-block cache, first attempt to lookup and unwrap - // the underlying CommitKVStore by StoreKey. If it does not exist, fallback to - // the main mapping of CommitKVStores. - if rs.interBlockCache != nil { - if store := rs.interBlockCache.Unwrap(key); store != nil { - return store - } - } - - return rs.stores[key] -} - -// StoreKeysByName returns mapping storeNames -> StoreKeys -func (rs *Store) StoreKeysByName() map[string]types.StoreKey { - return rs.keysByName -} - -// LoadLatestVersionAndUpgrade implements CommitMultiStore -func (rs *Store) LoadLatestVersionAndUpgrade(upgrades *types.StoreUpgrades) error { - ver := GetLatestVersion(rs.db) - return rs.loadVersion(ver, upgrades) -} - -// LoadVersionAndUpgrade allows us to rename substores while loading an older version -func (rs *Store) LoadVersionAndUpgrade(ver int64, upgrades *types.StoreUpgrades) error { - return rs.loadVersion(ver, upgrades) -} - -// LoadLatestVersion implements CommitMultiStore. -func (rs *Store) LoadLatestVersion() error { - ver := GetLatestVersion(rs.db) - return rs.loadVersion(ver, nil) -} - -// LoadVersion implements CommitMultiStore. -func (rs *Store) LoadVersion(ver int64) error { - return rs.loadVersion(ver, nil) -} - -func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { - infos := make(map[string]types.StoreInfo) - - rs.logger.Debug("loadVersion", "ver", ver) - cInfo := &types.CommitInfo{} - - // load old data if we are not version 0 - if ver != 0 { - var err error - cInfo, err = rs.GetCommitInfo(ver) - if err != nil { - return err - } - - // convert StoreInfos slice to map - for _, storeInfo := range cInfo.StoreInfos { - infos[storeInfo.Name] = storeInfo - } - } - - // load each Store (note this doesn't panic on unmounted keys now) - newStores := make(map[types.StoreKey]types.CommitKVStore) - - storesKeys := make([]types.StoreKey, 0, len(rs.storesParams)) - - for key := range rs.storesParams { - storesKeys = append(storesKeys, key) - } - - if upgrades != nil { - // deterministic iteration order for upgrades - // (as the underlying store may change and - // upgrades make store changes where the execution order may matter) - sort.Slice(storesKeys, func(i, j int) bool { - return storesKeys[i].Name() < storesKeys[j].Name() - }) - } - - for _, key := range storesKeys { - storeParams := rs.storesParams[key] - commitID := rs.getCommitID(infos, key.Name()) - rs.logger.Debug("loadVersion commitID", "key", key, "ver", ver, "hash", fmt.Sprintf("%x", commitID.Hash)) - - // If it has been added, set the initial version - if upgrades.IsAdded(key.Name()) || upgrades.RenamedFrom(key.Name()) != "" { - storeParams.initialVersion = uint64(ver) + 1 - } else if commitID.Version != ver && storeParams.typ == types.StoreTypeIAVL { - return fmt.Errorf("version of store %s mismatch root store's version; expected %d got %d; new stores should be added using StoreUpgrades", key.Name(), ver, commitID.Version) - } - - store, err := rs.loadCommitStoreFromParams(key, commitID, storeParams) - if err != nil { - return errorsmod.Wrap(err, "failed to load store") - } - - newStores[key] = store - - // If it was deleted, remove all data - if upgrades.IsDeleted(key.Name()) { - if err := deleteKVStore(store.(types.KVStore)); err != nil { - return errorsmod.Wrapf(err, "failed to delete store %s", key.Name()) - } - rs.removalMap[key] = true - } else if oldName := upgrades.RenamedFrom(key.Name()); oldName != "" { - // handle renames specially - // make an unregistered key to satisfy loadCommitStore params - oldKey := types.NewKVStoreKey(oldName) - oldParams := newStoreParams(oldKey, storeParams.db, storeParams.typ, 0) - - // load from the old name - oldStore, err := rs.loadCommitStoreFromParams(oldKey, rs.getCommitID(infos, oldName), oldParams) - if err != nil { - return errorsmod.Wrapf(err, "failed to load old store %s", oldName) - } - - // move all data - if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil { - return errorsmod.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) - } - - // add the old key so its deletion is committed - newStores[oldKey] = oldStore - // this will ensure it's not perpetually stored in commitInfo - rs.removalMap[oldKey] = true - } - } - - rs.lastCommitInfo = cInfo - rs.stores = newStores - - // load any snapshot heights we missed from disk to be pruned on the next run - if err := rs.pruningManager.LoadSnapshotHeights(rs.db); err != nil { - return err - } - - return nil -} - -func (rs *Store) getCommitID(infos map[string]types.StoreInfo, name string) types.CommitID { - info, ok := infos[name] - if !ok { - return types.CommitID{} - } - - return info.CommitId -} - -func deleteKVStore(kv types.KVStore) error { - // Note that we cannot write while iterating, so load all keys here, delete below - var keys [][]byte - itr := kv.Iterator(nil, nil) - for itr.Valid() { - keys = append(keys, itr.Key()) - itr.Next() - } - if err := itr.Close(); err != nil { - return err - } - - for _, k := range keys { - kv.Delete(k) - } - return nil -} - -// we simulate move by a copy and delete -func moveKVStoreData(oldDB, newDB types.KVStore) error { - // we read from one and write to another - itr := oldDB.Iterator(nil, nil) - for itr.Valid() { - newDB.Set(itr.Key(), itr.Value()) - itr.Next() - } - if err := itr.Close(); err != nil { - return err - } - - // then delete the old store - return deleteKVStore(oldDB) -} - -// PruneSnapshotHeight prunes the given height according to the prune strategy. -// If the strategy is PruneNothing, this is a no-op. -// For other strategies, this height is persisted until the snapshot is operated. -func (rs *Store) PruneSnapshotHeight(height int64) { - rs.pruningManager.HandleSnapshotHeight(height) -} - -// SetInterBlockCache sets the Store's internal inter-block (persistent) cache. -// When this is defined, all CommitKVStores will be wrapped with their respective -// inter-block cache. -func (rs *Store) SetInterBlockCache(c types.MultiStorePersistentCache) { - rs.interBlockCache = c -} - -// SetTracer sets the tracer for the MultiStore that the underlying -// stores will utilize to trace operations. A MultiStore is returned. -func (rs *Store) SetTracer(w io.Writer) types.MultiStore { - rs.traceWriter = w - return rs -} - -// SetTracingContext updates the tracing context for the MultiStore by merging -// the given context with the existing context by key. Any existing keys will -// be overwritten. It is implied that the caller should update the context when -// necessary between tracing operations. It returns a modified MultiStore. -func (rs *Store) SetTracingContext(tc types.TraceContext) types.MultiStore { - rs.traceContextMutex.Lock() - defer rs.traceContextMutex.Unlock() - rs.traceContext = rs.traceContext.Merge(tc) - - return rs -} - -func (rs *Store) getTracingContext() types.TraceContext { - rs.traceContextMutex.Lock() - defer rs.traceContextMutex.Unlock() - - if rs.traceContext == nil { - return nil - } - - ctx := types.TraceContext{} - for k, v := range rs.traceContext { - ctx[k] = v - } - - return ctx -} - -// TracingEnabled returns if tracing is enabled for the MultiStore. -func (rs *Store) TracingEnabled() bool { - return rs.traceWriter != nil -} - -// AddListeners adds a listener for the KVStore belonging to the provided StoreKey -func (rs *Store) AddListeners(keys []types.StoreKey) { - for i := range keys { - listener := rs.listeners[keys[i]] - if listener == nil { - rs.listeners[keys[i]] = types.NewMemoryListener() - } - } -} - -// ListeningEnabled returns if listening is enabled for a specific KVStore -func (rs *Store) ListeningEnabled(key types.StoreKey) bool { - if ls, ok := rs.listeners[key]; ok { - return ls != nil - } - return false -} - -// PopStateCache returns the accumulated state change messages from the CommitMultiStore -// Calling PopStateCache destroys only the currently accumulated state in each listener -// not the state in the store itself. This is a mutating and destructive operation. -// This method has been synchronized. -func (rs *Store) PopStateCache() []*types.StoreKVPair { - var cache []*types.StoreKVPair - for key := range rs.listeners { - ls := rs.listeners[key] - if ls != nil { - cache = append(cache, ls.PopStateCache()...) - } - } - sort.SliceStable(cache, func(i, j int) bool { - return cache[i].StoreKey < cache[j].StoreKey - }) - return cache -} - -// LatestVersion returns the latest version in the store -func (rs *Store) LatestVersion() int64 { - return rs.LastCommitID().Version -} - -// LastCommitID implements Committer/CommitStore. -func (rs *Store) LastCommitID() types.CommitID { - if rs.lastCommitInfo == nil { - return types.CommitID{ - Version: GetLatestVersion(rs.db), - } - } - - return rs.lastCommitInfo.CommitID() -} - -// Commit implements Committer/CommitStore. -func (rs *Store) Commit() types.CommitID { - var previousHeight, version int64 - if rs.lastCommitInfo.GetVersion() == 0 && rs.initialVersion > 1 { - // This case means that no commit has been made in the store, we - // start from initialVersion. - version = rs.initialVersion - } else { - // This case can means two things: - // - either there was already a previous commit in the store, in which - // case we increment the version from there, - // - or there was no previous commit, and initial version was not set, - // in which case we start at version 1. - previousHeight = rs.lastCommitInfo.GetVersion() - version = previousHeight + 1 - } - - if rs.commitHeader.Height != version { - rs.logger.Debug("commit header and version mismatch", "header_height", rs.commitHeader.Height, "version", version) - } - - rs.lastCommitInfo = commitStores(version, rs.stores, rs.removalMap) - rs.lastCommitInfo.Timestamp = rs.commitHeader.Time - defer rs.flushMetadata(rs.db, version, rs.lastCommitInfo) - - // remove remnants of removed stores - for sk := range rs.removalMap { - if _, ok := rs.stores[sk]; ok { - delete(rs.stores, sk) - delete(rs.storesParams, sk) - delete(rs.keysByName, sk.Name()) - } - } - - // reset the removalMap - rs.removalMap = make(map[types.StoreKey]bool) - - if err := rs.handlePruning(version); err != nil { - panic(err) - } - - return types.CommitID{ - Version: version, - Hash: rs.lastCommitInfo.Hash(), - } -} - -// WorkingHash returns the current hash of the store. -// it will be used to get the current app hash before commit. -func (rs *Store) WorkingHash() []byte { - storeInfos := make([]types.StoreInfo, 0, len(rs.stores)) - storeKeys := keysFromStoreKeyMap(rs.stores) - - for _, key := range storeKeys { - store := rs.stores[key] - - if store.GetStoreType() != types.StoreTypeIAVL { - continue - } - - if !rs.removalMap[key] { - si := types.StoreInfo{ - Name: key.Name(), - CommitId: types.CommitID{ - Hash: store.WorkingHash(), - }, - } - storeInfos = append(storeInfos, si) - } - } - - sort.SliceStable(storeInfos, func(i, j int) bool { - return storeInfos[i].Name < storeInfos[j].Name - }) - - return types.CommitInfo{StoreInfos: storeInfos}.Hash() -} - -// CacheWrap implements CacheWrapper/Store/CommitStore. -func (rs *Store) CacheWrap() types.CacheWrap { - return rs.CacheMultiStore().(types.CacheWrap) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - return rs.CacheWrap() -} - -// CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore. -// It implements the MultiStore interface. -func (rs *Store) CacheMultiStore() types.CacheMultiStore { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range rs.stores { - store := types.KVStore(v) - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(k) { - store = listenkv.NewStore(store, k, rs.listeners[k]) - } - stores[k] = store - } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) -} - -// CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it -// attempts to load stores at a given version (height). An error is returned if -// any store cannot be loaded. This should only be used for querying and -// iterating at past heights. -func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) { - cachedStores := make(map[types.StoreKey]types.CacheWrapper) - var commitInfo *types.CommitInfo - storeInfos := map[string]bool{} - for key, store := range rs.stores { - var cacheStore types.KVStore - switch store.GetStoreType() { - case types.StoreTypeIAVL: - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - store = rs.GetCommitKVStore(key) - - // Attempt to lazy-load an already saved IAVL store version. If the - // version does not exist or is pruned, an error should be returned. - var err error - cacheStore, err = store.(*iavl.Store).GetImmutable(version) - // if we got error from loading a module store - // we fetch commit info of this version - // we use commit info to check if the store existed at this version or not - if err != nil { - if commitInfo == nil { - var errCommitInfo error - commitInfo, errCommitInfo = rs.GetCommitInfo(version) - - if errCommitInfo != nil { - return nil, errCommitInfo - } - - for _, storeInfo := range commitInfo.StoreInfos { - storeInfos[storeInfo.Name] = true - } - } - - // If the store existed at this version, it means there's actually an error - // getting the root store at this version. - if storeInfos[key.Name()] { - return nil, err - } - } - - default: - cacheStore = store - } - - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(key) { - cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key]) - } - - cachedStores[key] = cacheStore - } - - return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil -} - -// GetStore returns a mounted Store for a given StoreKey. If the StoreKey does -// not exist, it will panic. If the Store is wrapped in an inter-block cache, it -// will be unwrapped prior to being returned. -// -// TODO: This isn't used directly upstream. Consider returning the Store as-is -// instead of unwrapping. -func (rs *Store) GetStore(key types.StoreKey) types.Store { - store := rs.GetCommitKVStore(key) - if store == nil { - panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) - } - - return store -} - -// GetKVStore returns a mounted KVStore for a given StoreKey. If tracing is -// enabled on the KVStore, a wrapped TraceKVStore will be returned with the root -// store's tracer, otherwise, the original KVStore will be returned. -// -// NOTE: The returned KVStore may be wrapped in an inter-block cache if it is -// set on the root store. -func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { - s := rs.stores[key] - if s == nil { - panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) - } - store := types.KVStore(s) - - if rs.TracingEnabled() { - store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) - } - if rs.ListeningEnabled(key) { - store = listenkv.NewStore(store, key, rs.listeners[key]) - } - - return store -} - -func (rs *Store) handlePruning(version int64) error { - pruneHeight := rs.pruningManager.GetPruningHeight(version) - rs.logger.Debug("prune start", "height", version) - defer rs.logger.Debug("prune end", "height", version) - return rs.PruneStores(pruneHeight) -} - -// PruneStores prunes all history upto the specific height of the multi store. -func (rs *Store) PruneStores(pruningHeight int64) (err error) { - if pruningHeight <= 0 { - rs.logger.Debug("pruning skipped, height is less than or equal to 0") - return nil - } - - rs.logger.Debug("pruning store", "heights", pruningHeight) - - for key, store := range rs.stores { - rs.logger.Debug("pruning store", "key", key) // Also log store.name (a private variable)? - - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - if store.GetStoreType() != types.StoreTypeIAVL { - continue - } - - store = rs.GetCommitKVStore(key) - - err := store.(*iavl.Store).DeleteVersionsTo(pruningHeight) - if err == nil { - continue - } - - if errors.Is(err, iavltree.ErrVersionDoesNotExist) { - return err - } - - rs.logger.Error("failed to prune store", "key", key, "err", err) - } - return nil -} - -// getStoreByName performs a lookup of a StoreKey given a store name typically -// provided in a path. The StoreKey is then used to perform a lookup and return -// a Store. If the Store is wrapped in an inter-block cache, it will be unwrapped -// prior to being returned. If the StoreKey does not exist, nil is returned. -func (rs *Store) GetStoreByName(name string) types.Store { - key := rs.keysByName[name] - if key == nil { - return nil - } - - return rs.GetCommitKVStore(key) -} - -// Query calls substore.Query with the same `req` where `req.Path` is -// modified to remove the substore prefix. -// Ie. `req.Path` here is `//`, and trimmed to `/` for the substore. -// TODO: add proof for `multistore -> substore`. -func (rs *Store) Query(req *types.RequestQuery) (*types.ResponseQuery, error) { - path := req.Path - storeName, subpath, err := parsePath(path) - if err != nil { - return &types.ResponseQuery{}, err - } - - store := rs.GetStoreByName(storeName) - if store == nil { - return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "no such store: %s", storeName) - } - - queryable, ok := store.(types.Queryable) - if !ok { - return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "store %s (type %T) doesn't support queries", storeName, store) - } - - // trim the path and make the query - req.Path = subpath - res, err := queryable.Query(req) - - if !req.Prove || !RequireProof(subpath) { - return res, err - } - - if res.ProofOps == nil || len(res.ProofOps.Ops) == 0 { - return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrInvalidRequest, "proof is unexpectedly empty; ensure height has not been pruned") - } - - // If the request's height is the latest height we've committed, then utilize - // the store's lastCommitInfo as this commit info may not be flushed to disk. - // Otherwise, we query for the commit info from disk. - var commitInfo *types.CommitInfo - - if res.Height == rs.lastCommitInfo.Version { - commitInfo = rs.lastCommitInfo - } else { - commitInfo, err = rs.GetCommitInfo(res.Height) - if err != nil { - return &types.ResponseQuery{}, err - } - } - - // Restore origin path and append proof op. - res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) - - return res, nil -} - -// SetInitialVersion sets the initial version of the IAVL tree. It is used when -// starting a new chain at an arbitrary height. -func (rs *Store) SetInitialVersion(version int64) error { - rs.initialVersion = version - - // Loop through all the stores, if it's an IAVL store, then set initial - // version on it. - for key, store := range rs.stores { - if store.GetStoreType() == types.StoreTypeIAVL { - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - store = rs.GetCommitKVStore(key) - store.(types.StoreWithInitialVersion).SetInitialVersion(version) - } - } - - return nil -} - -// parsePath expects a format like /[/] -// Must start with /, subpath may be empty -// Returns error if it doesn't start with / -func parsePath(path string) (storeName, subpath string, err error) { - if !strings.HasPrefix(path, "/") { - return storeName, subpath, errorsmod.Wrapf(types.ErrUnknownRequest, "invalid path: %s", path) - } - - paths := strings.SplitN(path[1:], "/", 2) - storeName = paths[0] - - if len(paths) == 2 { - subpath = "/" + paths[1] - } - - return storeName, subpath, nil -} - -//---------------------- Snapshotting ------------------ - -// Snapshot implements snapshottypes.Snapshotter. The snapshot output for a given format must be -// identical across nodes such that chunks from different sources fit together. If the output for a -// given format changes (at the byte level), the snapshot format must be bumped - see -// TestMultistoreSnapshot_Checksum test. -func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error { - if height == 0 { - return errorsmod.Wrap(types.ErrLogic, "cannot snapshot height 0") - } - if height > uint64(GetLatestVersion(rs.db)) { - return errorsmod.Wrapf(types.ErrLogic, "cannot snapshot future height %v", height) - } - - // Collect stores to snapshot (only IAVL stores are supported) - type namedStore struct { - *iavl.Store - name string - } - stores := []namedStore{} - keys := keysFromStoreKeyMap(rs.stores) - for _, key := range keys { - switch store := rs.GetCommitKVStore(key).(type) { - case *iavl.Store: - stores = append(stores, namedStore{name: key.Name(), Store: store}) - case *transient.Store, *mem.Store: - // Non-persisted stores shouldn't be snapshotted - continue - default: - return errorsmod.Wrapf(types.ErrLogic, - "don't know how to snapshot store %q of type %T", key.Name(), store) - } - } - sort.Slice(stores, func(i, j int) bool { - return strings.Compare(stores[i].name, stores[j].name) == -1 - }) - - // Export each IAVL store. Stores are serialized as a stream of SnapshotItem Protobuf - // messages. The first item contains a SnapshotStore with store metadata (i.e. name), - // and the following messages contain a SnapshotNode (i.e. an ExportNode). Store changes - // are demarcated by new SnapshotStore items. - for _, store := range stores { - rs.logger.Debug("starting snapshot", "store", store.name, "height", height) - exporter, err := store.Export(int64(height)) - if err != nil { - rs.logger.Error("snapshot failed; exporter error", "store", store.name, "err", err) - return err - } - - err = func() error { - defer exporter.Close() - - err := protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ - Item: &snapshottypes.SnapshotItem_Store{ - Store: &snapshottypes.SnapshotStoreItem{ - Name: store.name, - }, - }, - }) - if err != nil { - rs.logger.Error("snapshot failed; item store write failed", "store", store.name, "err", err) - return err - } - - nodeCount := 0 - for { - node, err := exporter.Next() - if err == iavltree.ErrorExportDone { - rs.logger.Debug("snapshot Done", "store", store.name, "nodeCount", nodeCount) - break - } else if err != nil { - return err - } - err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ - Item: &snapshottypes.SnapshotItem_IAVL{ - IAVL: &snapshottypes.SnapshotIAVLItem{ - Key: node.Key, - Value: node.Value, - Height: int32(node.Height), - Version: node.Version, - }, - }, - }) - if err != nil { - return err - } - nodeCount++ - } - - return nil - }() - - if err != nil { - return err - } - } - - return nil -} - -// Restore implements snapshottypes.Snapshotter. -// returns next snapshot item and error. -func (rs *Store) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshottypes.SnapshotItem, error) { - // Import nodes into stores. The first item is expected to be a SnapshotItem containing - // a SnapshotStoreItem, telling us which store to import into. The following items will contain - // SnapshotNodeItem (i.e. ExportNode) until we reach the next SnapshotStoreItem or EOF. - var importer *iavltree.Importer - var snapshotItem snapshottypes.SnapshotItem -loop: - for { - snapshotItem = snapshottypes.SnapshotItem{} - err := protoReader.ReadMsg(&snapshotItem) - if err == io.EOF { - break - } else if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message") - } - - switch item := snapshotItem.Item.(type) { - case *snapshottypes.SnapshotItem_Store: - if importer != nil { - err = importer.Commit() - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL commit failed") - } - importer.Close() - } - store, ok := rs.GetStoreByName(item.Store.Name).(*iavl.Store) - if !ok || store == nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrapf(types.ErrLogic, "cannot import into non-IAVL store %q", item.Store.Name) - } - importer, err = store.Import(int64(height)) - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "import failed") - } - defer importer.Close() - // Importer height must reflect the node height (which usually matches the block height, but not always) - rs.logger.Debug("restoring snapshot", "store", item.Store.Name) - - case *snapshottypes.SnapshotItem_IAVL: - if importer == nil { - rs.logger.Error("failed to restore; received IAVL node item before store item") - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(types.ErrLogic, "received IAVL node item before store item") - } - if item.IAVL.Height > math.MaxInt8 { - return snapshottypes.SnapshotItem{}, errorsmod.Wrapf(types.ErrLogic, "node height %v cannot exceed %v", - item.IAVL.Height, math.MaxInt8) - } - node := &iavltree.ExportNode{ - Key: item.IAVL.Key, - Value: item.IAVL.Value, - Height: int8(item.IAVL.Height), - Version: item.IAVL.Version, - } - // Protobuf does not differentiate between []byte{} as nil, but fortunately IAVL does - // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty. - if node.Key == nil { - node.Key = []byte{} - } - if node.Height == 0 && node.Value == nil { - node.Value = []byte{} - } - err := importer.Add(node) - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL node import failed") - } - - default: - break loop - } - } - - if importer != nil { - err := importer.Commit() - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL commit failed") - } - importer.Close() - } - - rs.flushMetadata(rs.db, int64(height), rs.buildCommitInfo(int64(height))) - return snapshotItem, rs.LoadLatestVersion() -} - -func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) { - var db dbm.DB - - if params.db != nil { - db = dbm.NewPrefixDB(params.db, []byte("s/_/")) - } else { - prefix := "s/k:" + params.key.Name() + "/" - db = dbm.NewPrefixDB(rs.db, []byte(prefix)) - } - - switch params.typ { - case types.StoreTypeMulti: - panic("recursive MultiStores not yet supported") - - case types.StoreTypeIAVL: - var store types.CommitKVStore - var err error - - if params.initialVersion == 0 { - store, err = iavl.LoadStore(db, rs.logger, key, id, rs.iavlCacheSize, rs.iavlDisableFastNode, rs.metrics) - } else { - store, err = iavl.LoadStoreWithInitialVersion(db, rs.logger, key, id, params.initialVersion, rs.iavlCacheSize, rs.iavlDisableFastNode, rs.metrics) - } - - if err != nil { - return nil, err - } - - if rs.interBlockCache != nil { - // Wrap and get a CommitKVStore with inter-block caching. Note, this should - // only wrap the primary CommitKVStore, not any store that is already - // branched as that will create unexpected behavior. - store = rs.interBlockCache.GetStoreCache(key, store) - } - - return store, err - - case types.StoreTypeDB: - return commitDBStoreAdapter{Store: dbadapter.Store{DB: db}}, nil - - case types.StoreTypeTransient: - _, ok := key.(*types.TransientStoreKey) - if !ok { - return nil, fmt.Errorf("invalid StoreKey for StoreTypeTransient: %s", key.String()) - } - - return transient.NewStore(), nil - - case types.StoreTypeMemory: - if _, ok := key.(*types.MemoryStoreKey); !ok { - return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s", key.String()) - } - - return mem.NewStore(), nil - - default: - panic(fmt.Sprintf("unrecognized store type %v", params.typ)) - } -} - -func (rs *Store) buildCommitInfo(version int64) *types.CommitInfo { - keys := keysFromStoreKeyMap(rs.stores) - storeInfos := []types.StoreInfo{} - for _, key := range keys { - store := rs.stores[key] - storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { - continue - } - storeInfos = append(storeInfos, types.StoreInfo{ - Name: key.Name(), - CommitId: store.LastCommitID(), - }) - } - return &types.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } -} - -// RollbackToVersion delete the versions after `target` and update the latest version. -func (rs *Store) RollbackToVersion(target int64) error { - if target <= 0 { - return fmt.Errorf("invalid rollback height target: %d", target) - } - - for key, store := range rs.stores { - if store.GetStoreType() == types.StoreTypeIAVL { - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - store = rs.GetCommitKVStore(key) - err := store.(*iavl.Store).LoadVersionForOverwriting(target) - if err != nil { - return err - } - } - } - - rs.flushMetadata(rs.db, target, rs.buildCommitInfo(target)) - - return rs.LoadLatestVersion() -} - -// SetCommitHeader sets the commit block header of the store. -func (rs *Store) SetCommitHeader(h cmtproto.Header) { - rs.commitHeader = h -} - -// GetCommitInfo attempts to retrieve CommitInfo for a given version/height. It -// will return an error if no CommitInfo exists, we fail to unmarshal the record -// or if we cannot retrieve the object from the DB. -func (rs *Store) GetCommitInfo(ver int64) (*types.CommitInfo, error) { - cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver) - - bz, err := rs.db.Get([]byte(cInfoKey)) - if err != nil { - return nil, errorsmod.Wrap(err, "failed to get commit info") - } else if bz == nil { - return nil, errors.New("no commit info found") - } - - cInfo := &types.CommitInfo{} - if err = cInfo.Unmarshal(bz); err != nil { - return nil, errorsmod.Wrap(err, "failed unmarshal commit info") - } - - return cInfo, nil -} - -func (rs *Store) flushMetadata(db dbm.DB, version int64, cInfo *types.CommitInfo) { - rs.logger.Debug("flushing metadata", "height", version) - batch := db.NewBatch() - defer func() { - _ = batch.Close() - }() - - if cInfo != nil { - flushCommitInfo(batch, version, cInfo) - } else { - rs.logger.Debug("commitInfo is nil, not flushed", "height", version) - } - - flushLatestVersion(batch, version) - - if err := batch.WriteSync(); err != nil { - panic(fmt.Errorf("error on batch write %w", err)) - } - rs.logger.Debug("flushing metadata finished", "height", version) -} - -type storeParams struct { - key types.StoreKey - db dbm.DB - typ types.StoreType - initialVersion uint64 -} - -func newStoreParams(key types.StoreKey, db dbm.DB, typ types.StoreType, initialVersion uint64) storeParams { - return storeParams{ - key: key, - db: db, - typ: typ, - initialVersion: initialVersion, - } -} - -func GetLatestVersion(db dbm.DB) int64 { - bz, err := db.Get([]byte(latestVersionKey)) - if err != nil { - panic(err) - } else if bz == nil { - return 0 - } - - var latestVersion int64 - - if err := gogotypes.StdInt64Unmarshal(&latestVersion, bz); err != nil { - panic(err) - } - - return latestVersion -} - -// Commits each store and returns a new commitInfo. -func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { - storeInfos := make([]types.StoreInfo, 0, len(storeMap)) - storeKeys := keysFromStoreKeyMap(storeMap) - - for _, key := range storeKeys { - store := storeMap[key] - last := store.LastCommitID() - - // If a commit event execution is interrupted, a new iavl store's version - // will be larger than the RMS's metadata, when the block is replayed, we - // should avoid committing that iavl store again. - var commitID types.CommitID - if last.Version >= version { - last.Version = version - commitID = last - } else { - commitID = store.Commit() - } - - storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { - continue - } - - if !removalMap[key] { - si := types.StoreInfo{} - si.Name = key.Name() - si.CommitId = commitID - storeInfos = append(storeInfos, si) - } - } - - sort.SliceStable(storeInfos, func(i, j int) bool { - return strings.Compare(storeInfos[i].Name, storeInfos[j].Name) < 0 - }) - - return &types.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } -} - -func flushCommitInfo(batch dbm.Batch, version int64, cInfo *types.CommitInfo) { - bz, err := cInfo.Marshal() - if err != nil { - panic(err) - } - - cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version) - err = batch.Set([]byte(cInfoKey), bz) - if err != nil { - panic(err) - } -} - -func flushLatestVersion(batch dbm.Batch, version int64) { - bz, err := gogotypes.StdInt64Marshal(version) - if err != nil { - panic(err) - } - - err = batch.Set([]byte(latestVersionKey), bz) - if err != nil { - panic(err) - } -} diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go deleted file mode 100644 index df69e2ab73..0000000000 --- a/store/rootmulti/store_test.go +++ /dev/null @@ -1,983 +0,0 @@ -package rootmulti - -import ( - "bytes" - "fmt" - "testing" - "time" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/store/cachemulti" - "cosmossdk.io/store/iavl" - sdkmaps "cosmossdk.io/store/internal/maps" - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -func TestStoreType(t *testing.T) { - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, db) -} - -func TestGetCommitKVStore(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) - err := ms.LoadLatestVersion() - require.Nil(t, err) - - key := ms.keysByName["store1"] - - store1 := ms.GetCommitKVStore(key) - require.NotNil(t, store1) - require.IsType(t, &iavl.Store{}, store1) - - store2 := ms.GetCommitStore(key) - require.NotNil(t, store2) - require.IsType(t, &iavl.Store{}, store2) -} - -func TestStoreMount(t *testing.T) { - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - - key1 := types.NewKVStoreKey("store1") - key2 := types.NewKVStoreKey("store2") - dup1 := types.NewKVStoreKey("store1") - - require.NotPanics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) }) - require.NotPanics(t, func() { store.MountStoreWithDB(key2, types.StoreTypeIAVL, db) }) - - require.Panics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) }) - require.Panics(t, func() { store.MountStoreWithDB(nil, types.StoreTypeIAVL, db) }) - require.Panics(t, func() { store.MountStoreWithDB(dup1, types.StoreTypeIAVL, db) }) -} - -func TestCacheMultiStore(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - cacheMulti := ms.CacheMultiStore() - require.IsType(t, cachemulti.Store{}, cacheMulti) -} - -func TestCacheMultiStoreWithVersion(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := ms.LoadLatestVersion() - require.Nil(t, err) - - commitID := types.CommitID{} - checkStore(t, ms, commitID, commitID) - - k, v := []byte("wind"), []byte("blows") - - store1 := ms.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - cID := ms.Commit() - require.Equal(t, int64(1), cID.Version) - - // require no failure when given an invalid or pruned version - _, err = ms.CacheMultiStoreWithVersion(cID.Version + 1) - require.Error(t, err) - - // require a valid version can be cache-loaded - cms, err := ms.CacheMultiStoreWithVersion(cID.Version) - require.NoError(t, err) - - // require a valid key lookup yields the correct value - kvStore := cms.GetKVStore(ms.keysByName["store1"]) - require.NotNil(t, kvStore) - require.Equal(t, kvStore.Get(k), v) - - // add new module stores (store4 and store5) to multi stores and commit - ms.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) - ms.MountStoreWithDB(types.NewKVStoreKey("store5"), types.StoreTypeIAVL, nil) - err = ms.LoadLatestVersionAndUpgrade(&types.StoreUpgrades{Added: []string{"store4", "store5"}}) - require.NoError(t, err) - ms.Commit() - - // cache multistore of version before adding store4 should works - _, err = ms.CacheMultiStoreWithVersion(1) - require.NoError(t, err) - - // require we cannot commit (write) to a cache-versioned multi-store - require.Panics(t, func() { - kvStore.Set(k, []byte("newValue")) - cms.Write() - }) -} - -func TestHashStableWithEmptyCommit(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := ms.LoadLatestVersion() - require.Nil(t, err) - - commitID := types.CommitID{} - checkStore(t, ms, commitID, commitID) - - k, v := []byte("wind"), []byte("blows") - - store1 := ms.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - workingHash := ms.WorkingHash() - cID := ms.Commit() - require.Equal(t, int64(1), cID.Version) - hash := cID.Hash - require.Equal(t, workingHash, hash) - - // make an empty commit, it should update version, but not affect hash - workingHash = ms.WorkingHash() - cID = ms.Commit() - require.Equal(t, workingHash, cID.Hash) - require.Equal(t, int64(2), cID.Version) - require.Equal(t, hash, cID.Hash) -} - -func TestMultistoreCommitLoad(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := store.LoadLatestVersion() - require.Nil(t, err) - - // New store has empty last commit. - commitID := types.CommitID{} - checkStore(t, store, commitID, commitID) - - // Make sure we can get stores by name. - s1 := store.GetStoreByName("store1") - require.NotNil(t, s1) - s3 := store.GetStoreByName("store3") - require.NotNil(t, s3) - s77 := store.GetStoreByName("store77") - require.Nil(t, s77) - - // Make a few commits and check them. - nCommits := int64(3) - for i := int64(0); i < nCommits; i++ { - workingHash := store.WorkingHash() - commitID = store.Commit() - require.Equal(t, workingHash, commitID.Hash) - expectedCommitID := getExpectedCommitID(store, i+1) - checkStore(t, store, expectedCommitID, commitID) - } - - // Load the latest multistore again and check version. - store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = store.LoadLatestVersion() - require.Nil(t, err) - commitID = getExpectedCommitID(store, nCommits) - checkStore(t, store, commitID, commitID) - - // Commit and check version. - workingHash := store.WorkingHash() - commitID = store.Commit() - require.Equal(t, workingHash, commitID.Hash) - expectedCommitID := getExpectedCommitID(store, nCommits+1) - checkStore(t, store, expectedCommitID, commitID) - - // Load an older multistore and check version. - ver := nCommits - 1 - store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = store.LoadVersion(ver) - require.Nil(t, err) - commitID = getExpectedCommitID(store, ver) - checkStore(t, store, commitID, commitID) -} - -func TestMultistoreLoadWithUpgrade(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := store.LoadLatestVersion() - require.Nil(t, err) - - // write some data in all stores - k1, v1 := []byte("first"), []byte("store") - s1, _ := store.GetStoreByName("store1").(types.KVStore) - require.NotNil(t, s1) - s1.Set(k1, v1) - - k2, v2 := []byte("second"), []byte("restore") - s2, _ := store.GetStoreByName("store2").(types.KVStore) - require.NotNil(t, s2) - s2.Set(k2, v2) - - k3, v3 := []byte("third"), []byte("dropped") - s3, _ := store.GetStoreByName("store3").(types.KVStore) - require.NotNil(t, s3) - s3.Set(k3, v3) - - s4, _ := store.GetStoreByName("store4").(types.KVStore) - require.Nil(t, s4) - - // do one commit - workingHash := store.WorkingHash() - commitID := store.Commit() - require.Equal(t, workingHash, commitID.Hash) - expectedCommitID := getExpectedCommitID(store, 1) - checkStore(t, store, expectedCommitID, commitID) - - ci, err := store.GetCommitInfo(1) - require.NoError(t, err) - require.Equal(t, int64(1), ci.Version) - require.Equal(t, 3, len(ci.StoreInfos)) - checkContains(t, ci.StoreInfos, []string{"store1", "store2", "store3"}) - - // Load without changes and make sure it is sensible - store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - err = store.LoadLatestVersion() - require.Nil(t, err) - commitID = getExpectedCommitID(store, 1) - checkStore(t, store, commitID, commitID) - - // let's query data to see it was saved properly - s2, _ = store.GetStoreByName("store2").(types.KVStore) - require.NotNil(t, s2) - require.Equal(t, v2, s2.Get(k2)) - - // now, let's load with upgrades... - restore, upgrades := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = restore.LoadLatestVersionAndUpgrade(upgrades) - require.Nil(t, err) - - // s1 was not changed - s1, _ = restore.GetStoreByName("store1").(types.KVStore) - require.NotNil(t, s1) - require.Equal(t, v1, s1.Get(k1)) - - // store3 is mounted, but data deleted are gone - s3, _ = restore.GetStoreByName("store3").(types.KVStore) - require.NotNil(t, s3) - require.Nil(t, s3.Get(k3)) // data was deleted - - // store4 is mounted, with empty data - s4, _ = restore.GetStoreByName("store4").(types.KVStore) - require.NotNil(t, s4) - - iterator := s4.Iterator(nil, nil) - - values := 0 - for ; iterator.Valid(); iterator.Next() { - values++ - } - require.Zero(t, values) - - require.NoError(t, iterator.Close()) - - // write something inside store4 - k4, v4 := []byte("fourth"), []byte("created") - s4.Set(k4, v4) - - // store2 is no longer mounted - st2 := restore.GetStoreByName("store2") - require.Nil(t, st2) - - // restore2 has the old data - rs2, _ := restore.GetStoreByName("restore2").(types.KVStore) - require.NotNil(t, rs2) - require.Equal(t, v2, rs2.Get(k2)) - - // store this migrated data, and load it again without migrations - migratedID := restore.Commit() - require.Equal(t, migratedID.Version, int64(2)) - - reload, _ := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - // unmount store3 since store3 was deleted - unmountStore(reload, "store3") - - rs3, _ := reload.GetStoreByName("store3").(types.KVStore) - require.Nil(t, rs3) - - err = reload.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, migratedID, reload.LastCommitID()) - - // query this new store - rl1, _ := reload.GetStoreByName("store1").(types.KVStore) - require.NotNil(t, rl1) - require.Equal(t, v1, rl1.Get(k1)) - - rl2, _ := reload.GetStoreByName("restore2").(types.KVStore) - require.NotNil(t, rl2) - require.Equal(t, v2, rl2.Get(k2)) - - rl4, _ := reload.GetStoreByName("store4").(types.KVStore) - require.NotNil(t, rl4) - require.Equal(t, v4, rl4.Get(k4)) - - // check commitInfo in storage - ci, err = reload.GetCommitInfo(2) - require.NoError(t, err) - require.Equal(t, int64(2), ci.Version) - require.Equal(t, 3, len(ci.StoreInfos), ci.StoreInfos) - checkContains(t, ci.StoreInfos, []string{"store1", "restore2", "store4"}) -} - -func TestParsePath(t *testing.T) { - _, _, err := parsePath("foo") - require.Error(t, err) - - store, subpath, err := parsePath("/foo") - require.NoError(t, err) - require.Equal(t, store, "foo") - require.Equal(t, subpath, "") - - store, subpath, err = parsePath("/fizz/bang/baz") - require.NoError(t, err) - require.Equal(t, store, "fizz") - require.Equal(t, subpath, "/bang/baz") - - substore, subsubpath, err := parsePath(subpath) - require.NoError(t, err) - require.Equal(t, substore, "bang") - require.Equal(t, subsubpath, "/baz") -} - -func TestMultiStoreRestart(t *testing.T) { - db := dbm.NewMemDB() - pruning := pruningtypes.NewCustomPruningOptions(2, 1) - multi := newMultiStoreWithMounts(db, pruning) - err := multi.LoadLatestVersion() - require.Nil(t, err) - - initCid := multi.LastCommitID() - - k, v := "wind", "blows" - k2, v2 := "water", "flows" - k3, v3 := "fire", "burns" - - for i := 1; i < 3; i++ { - // Set and commit data in one store. - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, i))) - - // ... and another. - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, i))) - - // ... and another. - store3 := multi.GetStoreByName("store3").(types.KVStore) - store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, i))) - - multi.Commit() - - cinfo, err := multi.GetCommitInfo(int64(i)) - require.NoError(t, err) - require.Equal(t, int64(i), cinfo.Version) - } - - // Set and commit data in one store. - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, 3))) - - // ... and another. - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, 3))) - - multi.Commit() - - flushedCinfo, err := multi.GetCommitInfo(3) - require.Nil(t, err) - require.NotEqual(t, initCid, flushedCinfo, "CID is different after flush to disk") - - // ... and another. - store3 := multi.GetStoreByName("store3").(types.KVStore) - store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, 3))) - - multi.Commit() - - postFlushCinfo, err := multi.GetCommitInfo(4) - require.NoError(t, err) - require.Equal(t, int64(4), postFlushCinfo.Version, "Commit changed after in-memory commit") - - multi = newMultiStoreWithMounts(db, pruning) - err = multi.LoadLatestVersion() - require.Nil(t, err) - - reloadedCid := multi.LastCommitID() - require.Equal(t, int64(4), reloadedCid.Version, "Reloaded CID is not the same as last flushed CID") - - // Check that store1 and store2 retained date from 3rd commit - store1 = multi.GetStoreByName("store1").(types.KVStore) - val := store1.Get([]byte(k)) - require.Equal(t, []byte(fmt.Sprintf("%s:%d", v, 3)), val, "Reloaded value not the same as last flushed value") - - store2 = multi.GetStoreByName("store2").(types.KVStore) - val2 := store2.Get([]byte(k2)) - require.Equal(t, []byte(fmt.Sprintf("%s:%d", v2, 3)), val2, "Reloaded value not the same as last flushed value") - - // Check that store3 still has data from last commit even though update happened on 2nd commit - store3 = multi.GetStoreByName("store3").(types.KVStore) - val3 := store3.Get([]byte(k3)) - require.Equal(t, []byte(fmt.Sprintf("%s:%d", v3, 3)), val3, "Reloaded value not the same as last flushed value") -} - -func TestMultiStoreQuery(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := multi.LoadLatestVersion() - require.Nil(t, err) - - k, v := []byte("wind"), []byte("blows") - k2, v2 := []byte("water"), []byte("flows") - // v3 := []byte("is cold") - - // Commit the multistore. - _ = multi.Commit() - - // Make sure we can get by name. - garbage := multi.GetStoreByName("bad-name") - require.Nil(t, garbage) - - // Set and commit data in one store. - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - // ... and another. - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set(k2, v2) - - // Commit the multistore. - cid := multi.Commit() - ver := cid.Version - - // Reload multistore from database - multi = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = multi.LoadLatestVersion() - require.Nil(t, err) - - // Test bad path. - query := types.RequestQuery{Path: "/key", Data: k, Height: ver} - _, err = multi.Query(&query) - codespace, code, _ := errors.ABCIInfo(err, false) - require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) - require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) - - query.Path = "h897fy32890rf63296r92" - _, err = multi.Query(&query) - codespace, code, _ = errors.ABCIInfo(err, false) - require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) - require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) - - // Test invalid store name. - query.Path = "/garbage/key" - _, err = multi.Query(&query) - codespace, code, _ = errors.ABCIInfo(err, false) - require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) - require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) - - // Test valid query with data. - query.Path = "/store1/key" - qres, err := multi.Query(&query) - require.NoError(t, err) - require.Equal(t, v, qres.Value) - - // Test valid but empty query. - query.Path = "/store2/key" - query.Prove = true - qres, err = multi.Query(&query) - require.NoError(t, err) - require.Nil(t, qres.Value) - - // Test store2 data. - // Since we are using the request as a reference, the path will be modified. - query.Data = k2 - query.Path = "/store2/key" - qres, err = multi.Query(&query) - require.NoError(t, err) - require.Equal(t, v2, qres.Value) -} - -func TestMultiStore_Pruning(t *testing.T) { - testCases := []struct { - name string - numVersions int64 - po pruningtypes.PruningOptions - deleted []int64 - saved []int64 - }{ - {"prune nothing", 10, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - {"prune everything", 12, pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []int64{1, 2, 3, 4, 5, 6, 7}, []int64{8, 9, 10, 11, 12}}, - {"prune some; no batch", 10, pruningtypes.NewCustomPruningOptions(2, 1), []int64{1, 2, 3, 4, 6, 5, 7}, []int64{8, 9, 10}}, - {"prune some; small batch", 10, pruningtypes.NewCustomPruningOptions(2, 3), []int64{1, 2, 3, 4, 5, 6}, []int64{7, 8, 9, 10}}, - {"prune some; large batch", 10, pruningtypes.NewCustomPruningOptions(2, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - } - - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - db := dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, tc.po) - require.NoError(t, ms.LoadLatestVersion()) - - for i := int64(0); i < tc.numVersions; i++ { - ms.Commit() - } - - for _, v := range tc.saved { - _, err := ms.CacheMultiStoreWithVersion(v) - require.NoError(t, err, "expected no error when loading height: %d", v) - } - - for _, v := range tc.deleted { - _, err := ms.CacheMultiStoreWithVersion(v) - require.Error(t, err, "expected error when loading height: %d", v) - } - }) - } -} - -func TestMultiStore_Pruning_SameHeightsTwice(t *testing.T) { - const ( - numVersions int64 = 10 - keepRecent uint64 = 2 - interval uint64 = 10 - ) - - db := dbm.NewMemDB() - - ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(keepRecent, interval)) - require.NoError(t, ms.LoadLatestVersion()) - - var lastCommitInfo types.CommitID - for i := int64(0); i < numVersions; i++ { - lastCommitInfo = ms.Commit() - } - - require.Equal(t, numVersions, lastCommitInfo.Version) - - for v := int64(1); v < numVersions-int64(keepRecent); v++ { - err := ms.LoadVersion(v) - require.Error(t, err, "expected error when loading pruned height: %d", v) - } - - for v := (numVersions - int64(keepRecent)); v < numVersions; v++ { - err := ms.LoadVersion(v) - require.NoError(t, err, "expected no error when loading height: %d", v) - } - - // Get latest - err := ms.LoadVersion(numVersions - 1) - require.NoError(t, err) - - // Ensure already pruned snapshot heights were loaded - require.NoError(t, ms.pruningManager.LoadSnapshotHeights(db)) - - // Test pruning the same heights again - lastCommitInfo = ms.Commit() - require.Equal(t, numVersions, lastCommitInfo.Version) - - // Ensure that can commit one more height with no panic - lastCommitInfo = ms.Commit() - require.Equal(t, numVersions+1, lastCommitInfo.Version) -} - -func TestMultiStore_PruningRestart(t *testing.T) { - db := dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) - require.NoError(t, ms.LoadLatestVersion()) - - // Commit enough to build up heights to prune, where on the next block we should - // batch delete. - for i := int64(0); i < 10; i++ { - ms.Commit() - } - - actualHeightToPrune := ms.pruningManager.GetPruningHeight(ms.LatestVersion()) - require.Equal(t, int64(0), actualHeightToPrune) - - // "restart" - ms = newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) - err := ms.LoadLatestVersion() - require.NoError(t, err) - - actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion()) - require.Equal(t, int64(0), actualHeightToPrune) - - // commit one more block and ensure the heights have been pruned - ms.Commit() - - actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion()) - require.Equal(t, int64(8), actualHeightToPrune) - - for v := int64(1); v <= actualHeightToPrune; v++ { - _, err := ms.CacheMultiStoreWithVersion(v) - require.Error(t, err, "expected error when loading height: %d", v) - } -} - -// TestUnevenStoresHeightCheck tests if loading root store correctly errors when -// there's any module store with the wrong height -func TestUnevenStoresHeightCheck(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := store.LoadLatestVersion() - require.Nil(t, err) - - // commit to increment store's height - store.Commit() - - // mount store4 to root store - store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) - - // load the stores without upgrades - err = store.LoadLatestVersion() - require.Error(t, err) - - // now, let's load with upgrades... - upgrades := &types.StoreUpgrades{ - Added: []string{"store4"}, - } - err = store.LoadLatestVersionAndUpgrade(upgrades) - require.Nil(t, err) -} - -func TestSetInitialVersion(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - require.NoError(t, multi.LoadLatestVersion()) - - err := multi.SetInitialVersion(5) - require.NoError(t, err) - require.Equal(t, int64(5), multi.initialVersion) - - multi.Commit() - require.Equal(t, int64(5), multi.LastCommitID().Version) - - ckvs := multi.GetCommitKVStore(multi.keysByName["store1"]) - iavlStore, ok := ckvs.(*iavl.Store) - require.True(t, ok) - require.True(t, iavlStore.VersionExists(5)) -} - -func TestAddListenersAndListeningEnabled(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - testKey := types.NewKVStoreKey("listening_test_key") - enabled := multi.ListeningEnabled(testKey) - require.False(t, enabled) - - wrongTestKey := types.NewKVStoreKey("wrong_listening_test_key") - multi.AddListeners([]types.StoreKey{testKey}) - enabled = multi.ListeningEnabled(wrongTestKey) - require.False(t, enabled) - - enabled = multi.ListeningEnabled(testKey) - require.True(t, enabled) -} - -func TestCacheWraps(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - cacheWrapper := multi.CacheWrap() - require.IsType(t, cachemulti.Store{}, cacheWrapper) - - cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil) - require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace) -} - -func TestTraceConcurrency(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := multi.LoadLatestVersion() - require.NoError(t, err) - - b := &bytes.Buffer{} - key := multi.keysByName["store1"] - tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) - - multi.SetTracer(b) - multi.SetTracingContext(tc) - - cms := multi.CacheMultiStore() - store1 := cms.GetKVStore(key) - cw := store1.CacheWrapWithTrace(b, tc) - _ = cw - require.NotNil(t, store1) - - stop := make(chan struct{}) - stopW := make(chan struct{}) - - go func(stop chan struct{}) { - for { - select { - case <-stop: - return - default: - store1.Set([]byte{1}, []byte{1}) - cms.Write() - } - } - }(stop) - - go func(stop chan struct{}) { - for { - select { - case <-stop: - return - default: - multi.SetTracingContext(tc) - } - } - }(stopW) - - time.Sleep(3 * time.Second) - stop <- struct{}{} - stopW <- struct{}{} -} - -func TestCommitOrdered(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := multi.LoadLatestVersion() - require.Nil(t, err) - - commitID := types.CommitID{} - checkStore(t, multi, commitID, commitID) - - k, v := []byte("wind"), []byte("blows") - k2, v2 := []byte("water"), []byte("flows") - k3, v3 := []byte("fire"), []byte("burns") - - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set(k2, v2) - - store3 := multi.GetStoreByName("store3").(types.KVStore) - store3.Set(k3, v3) - - typeID := multi.Commit() - require.Equal(t, int64(1), typeID.Version) - - ci, err := multi.GetCommitInfo(1) - require.NoError(t, err) - require.Equal(t, int64(1), ci.Version) - require.Equal(t, 3, len(ci.StoreInfos)) - for i, s := range ci.StoreInfos { - require.Equal(t, s.Name, fmt.Sprintf("store%d", i+1)) - } -} - -//----------------------------------------------------------------------- -// utils - -var ( - testStoreKey1 = types.NewKVStoreKey("store1") - testStoreKey2 = types.NewKVStoreKey("store2") - testStoreKey3 = types.NewKVStoreKey("store3") -) - -func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store { - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.SetPruning(pruningOpts) - - store.MountStoreWithDB(testStoreKey1, types.StoreTypeIAVL, nil) - store.MountStoreWithDB(testStoreKey2, types.StoreTypeIAVL, nil) - store.MountStoreWithDB(testStoreKey3, types.StoreTypeIAVL, nil) - - return store -} - -func newMultiStoreWithModifiedMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) (*Store, *types.StoreUpgrades) { - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.SetPruning(pruningOpts) - - store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("restore2"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("store3"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) - - upgrades := &types.StoreUpgrades{ - Added: []string{"store4"}, - Renamed: []types.StoreRename{{ - OldKey: "store2", - NewKey: "restore2", - }}, - Deleted: []string{"store3"}, - } - - return store, upgrades -} - -func unmountStore(rootStore *Store, storeKeyName string) { - sk := rootStore.keysByName[storeKeyName] - delete(rootStore.stores, sk) - delete(rootStore.storesParams, sk) - delete(rootStore.keysByName, storeKeyName) -} - -func checkStore(t *testing.T, store *Store, expect, got types.CommitID) { - t.Helper() - require.Equal(t, expect, got) - require.Equal(t, expect, store.LastCommitID()) -} - -func checkContains(tb testing.TB, info []types.StoreInfo, wanted []string) { - tb.Helper() - - for _, want := range wanted { - checkHas(tb, info, want) - } -} - -func checkHas(tb testing.TB, info []types.StoreInfo, want string) { - tb.Helper() - for _, i := range info { - if i.Name == want { - return - } - } - tb.Fatalf("storeInfo doesn't contain %s", want) -} - -func getExpectedCommitID(store *Store, ver int64) types.CommitID { - return types.CommitID{ - Version: ver, - Hash: hashStores(store.stores), - } -} - -func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte { - m := make(map[string][]byte, len(stores)) - for key, store := range stores { - name := key.Name() - m[name] = types.StoreInfo{ - Name: name, - CommitId: store.LastCommitID(), - }.GetHash() - } - return sdkmaps.HashFromMap(m) -} - -type MockListener struct { - stateCache []types.StoreKVPair -} - -func (tl *MockListener) OnWrite(storeKey types.StoreKey, key, value []byte, delete bool) error { - tl.stateCache = append(tl.stateCache, types.StoreKVPair{ - StoreKey: storeKey.Name(), - Key: key, - Value: value, - Delete: delete, - }) - return nil -} - -func TestStateListeners(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - require.Empty(t, ms.listeners) - - ms.AddListeners([]types.StoreKey{testStoreKey1}) - require.Equal(t, 1, len(ms.listeners)) - - require.NoError(t, ms.LoadLatestVersion()) - cacheMulti := ms.CacheMultiStore() - - store := cacheMulti.GetKVStore(testStoreKey1) - store.Set([]byte{1}, []byte{1}) - require.Empty(t, ms.PopStateCache()) - - // writes are observed when cache store commit. - cacheMulti.Write() - require.Equal(t, 1, len(ms.PopStateCache())) - - // test no listening on unobserved store - store = cacheMulti.GetKVStore(testStoreKey2) - store.Set([]byte{1}, []byte{1}) - require.Empty(t, ms.PopStateCache()) - - // writes are not observed when cache store commit - cacheMulti.Write() - require.Empty(t, ms.PopStateCache()) -} - -type commitKVStoreStub struct { - types.CommitKVStore - Committed int -} - -func (stub *commitKVStoreStub) Commit() types.CommitID { - commitID := stub.CommitKVStore.Commit() - stub.Committed++ - return commitID -} - -func prepareStoreMap() (map[types.StoreKey]types.CommitKVStore, error) { - var db dbm.DB = dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) - if err := store.LoadLatestVersion(); err != nil { - return nil, err - } - return map[types.StoreKey]types.CommitKVStore{ - testStoreKey1: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl1").(types.CommitKVStore), - }, - testStoreKey2: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl2").(types.CommitKVStore), - }, - testStoreKey3: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("trans1").(types.CommitKVStore), - }, - }, nil -} - -func TestCommitStores(t *testing.T) { - testCases := []struct { - name string - committed int - exptectCommit int - }{ - { - "when upgrade not get interrupted", - 0, - 1, - }, - { - "when upgrade get interrupted once", - 1, - 0, - }, - { - "when upgrade get interrupted twice", - 2, - 0, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - storeMap, err := prepareStoreMap() - require.NoError(t, err) - store := storeMap[testStoreKey1].(*commitKVStoreStub) - for i := tc.committed; i > 0; i-- { - store.Commit() - } - store.Committed = 0 - var version int64 = 1 - removalMap := map[types.StoreKey]bool{} - res := commitStores(version, storeMap, removalMap) - for _, s := range res.StoreInfos { - require.Equal(t, version, s.CommitId.Version) - } - require.Equal(t, version, res.Version) - require.Equal(t, tc.exptectCommit, store.Committed) - }) - } -} diff --git a/store/snapshots/chunk.go b/store/snapshots/chunk.go index fdf8cbd4b9..c70fc074b0 100644 --- a/store/snapshots/chunk.go +++ b/store/snapshots/chunk.go @@ -5,8 +5,8 @@ import ( "math" "cosmossdk.io/errors" - snapshottypes "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" + "cosmossdk.io/store/v2" + snapshottypes "cosmossdk.io/store/v2/snapshots/types" ) // ChunkWriter reads an input stream, splits it into fixed-size chunks, and writes them to a @@ -72,7 +72,7 @@ func (w *ChunkWriter) CloseWithError(err error) { // Write implements io.Writer. func (w *ChunkWriter) Write(data []byte) (int, error) { if w.closed { - return 0, errors.Wrap(storetypes.ErrLogic, "cannot write to closed ChunkWriter") + return 0, errors.Wrap(store.ErrLogic, "cannot write to closed ChunkWriter") } nTotal := 0 for len(data) > 0 { @@ -174,7 +174,7 @@ func ValidRestoreHeight(format uint32, height uint64) error { } if height == 0 { - return errors.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0") + return errors.Wrap(store.ErrLogic, "cannot restore snapshot at height 0") } if height > uint64(math.MaxInt64) { return errors.Wrapf(snapshottypes.ErrInvalidMetadata, diff --git a/store/snapshots/chunk_test.go b/store/snapshots/chunk_test.go index df524cdf3c..2cf00eef27 100644 --- a/store/snapshots/chunk_test.go +++ b/store/snapshots/chunk_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "cosmossdk.io/store/snapshots" + "cosmossdk.io/store/v2/snapshots" ) func TestChunkWriter(t *testing.T) { diff --git a/store/snapshots/helpers_test.go b/store/snapshots/helpers_test.go index d337b53ab7..7c6cf04bcd 100644 --- a/store/snapshots/helpers_test.go +++ b/store/snapshots/helpers_test.go @@ -17,9 +17,8 @@ import ( errorsmod "cosmossdk.io/errors" "cosmossdk.io/log" - "cosmossdk.io/store/snapshots" - snapshottypes "cosmossdk.io/store/snapshots/types" - "cosmossdk.io/store/types" + "cosmossdk.io/store/v2/snapshots" + snapshottypes "cosmossdk.io/store/v2/snapshots/types" ) func checksums(slice [][]byte) [][]byte { @@ -302,7 +301,7 @@ func (s *extSnapshotter) SupportedFormats() []uint32 { func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshottypes.ExtensionPayloadWriter) error { for _, i := range s.state { - if err := payloadWriter(types.Uint64ToBigEndian(i)); err != nil { + if err := payloadWriter(snapshottypes.Uint64ToBigEndian(i)); err != nil { return err } } @@ -317,7 +316,7 @@ func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadR } else if err != nil { return err } - s.state = append(s.state, types.BigEndianToUint64(payload)) + s.state = append(s.state, snapshottypes.BigEndianToUint64(payload)) } // finalize restoration return nil diff --git a/store/snapshots/manager.go b/store/snapshots/manager.go index bd5b9878d7..8dd1381e1e 100644 --- a/store/snapshots/manager.go +++ b/store/snapshots/manager.go @@ -13,8 +13,8 @@ import ( errorsmod "cosmossdk.io/errors" "cosmossdk.io/log" - "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/snapshots/types" ) // Manager manages snapshot and restore operations for an app, making sure only a single @@ -112,10 +112,10 @@ func (m *Manager) begin(op operation) error { // beginLocked begins an operation while already holding the mutex. func (m *Manager) beginLocked(op operation) error { if op == opNone { - return errorsmod.Wrap(storetypes.ErrLogic, "can't begin a none operation") + return errorsmod.Wrap(store.ErrLogic, "can't begin a none operation") } if m.operation != opNone { - return errorsmod.Wrapf(storetypes.ErrConflict, "a %v operation is in progress", m.operation) + return errorsmod.Wrapf(store.ErrConflict, "a %v operation is in progress", m.operation) } m.operation = op return nil @@ -161,7 +161,7 @@ func (m *Manager) GetSnapshotBlockRetentionHeights() int64 { // Create creates a snapshot and returns its metadata. func (m *Manager) Create(height uint64) (*types.Snapshot, error) { if m == nil { - return nil, errorsmod.Wrap(storetypes.ErrLogic, "no snapshot store configured") + return nil, errorsmod.Wrap(store.ErrLogic, "no snapshot store configured") } defer m.multistore.PruneSnapshotHeight(int64(height)) @@ -177,7 +177,7 @@ func (m *Manager) Create(height uint64) (*types.Snapshot, error) { return nil, errorsmod.Wrap(err, "failed to examine latest snapshot") } if latest != nil && latest.Height >= height { - return nil, errorsmod.Wrapf(storetypes.ErrConflict, + return nil, errorsmod.Wrapf(store.ErrConflict, "a more recent snapshot already exists at height %v", latest.Height) } @@ -279,7 +279,7 @@ func (m *Manager) Restore(snapshot types.Snapshot) error { return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format) } if snapshot.Height == 0 { - return errorsmod.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0") + return errorsmod.Wrap(store.ErrLogic, "cannot restore snapshot at height 0") } if snapshot.Height > uint64(math.MaxInt64) { return errorsmod.Wrapf(types.ErrInvalidMetadata, @@ -375,11 +375,11 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io. } metadata := nextItem.GetExtension() if metadata == nil { - return errorsmod.Wrapf(storetypes.ErrLogic, "unknown snapshot item %T", nextItem.Item) + return errorsmod.Wrapf(store.ErrLogic, "unknown snapshot item %T", nextItem.Item) } extension, ok := m.extensions[metadata.Name] if !ok { - return errorsmod.Wrapf(storetypes.ErrLogic, "unknown extension snapshotter %s", metadata.Name) + return errorsmod.Wrapf(store.ErrLogic, "unknown extension snapshotter %s", metadata.Name) } if !IsFormatSupported(extension, metadata.Format) { return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name) @@ -402,11 +402,11 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { m.mtx.Lock() defer m.mtx.Unlock() if m.operation != opRestore { - return false, errorsmod.Wrap(storetypes.ErrLogic, "no restore operation in progress") + return false, errorsmod.Wrap(store.ErrLogic, "no restore operation in progress") } if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { - return false, errorsmod.Wrap(storetypes.ErrLogic, "received unexpected chunk") + return false, errorsmod.Wrap(store.ErrLogic, "received unexpected chunk") } // Check if any errors have occurred yet. @@ -416,7 +416,7 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { if done.err != nil { return false, done.err } - return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended unexpectedly") + return false, errorsmod.Wrap(store.ErrLogic, "restore ended unexpectedly") default: } @@ -452,7 +452,7 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { return false, done.err } if !done.complete { - return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended prematurely") + return false, errorsmod.Wrap(store.ErrLogic, "restore ended prematurely") } return true, nil diff --git a/store/snapshots/manager_test.go b/store/snapshots/manager_test.go index 49f31e8627..c3276d01ed 100644 --- a/store/snapshots/manager_test.go +++ b/store/snapshots/manager_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/require" "cosmossdk.io/log" - "cosmossdk.io/store/snapshots" - "cosmossdk.io/store/snapshots/types" + "cosmossdk.io/store/v2/snapshots" + "cosmossdk.io/store/v2/snapshots/types" ) var opts = types.NewSnapshotOptions(1500, 2) diff --git a/store/snapshots/store.go b/store/snapshots/store.go index 2f08a6e6c4..4d202cb2d5 100644 --- a/store/snapshots/store.go +++ b/store/snapshots/store.go @@ -15,8 +15,8 @@ import ( "github.com/cosmos/gogoproto/proto" "cosmossdk.io/errors" - "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/snapshots/types" ) const ( @@ -36,7 +36,7 @@ type Store struct { // NewStore creates a new snapshot store. func NewStore(db db.DB, dir string) (*Store, error) { if dir == "" { - return nil, errors.Wrap(storetypes.ErrLogic, "snapshot directory not given") + return nil, errors.Wrap(store.ErrLogic, "snapshot directory not given") } err := os.MkdirAll(dir, 0o755) if err != nil { @@ -56,7 +56,7 @@ func (s *Store) Delete(height uint64, format uint32) error { saving := s.saving[height] s.mtx.Unlock() if saving { - return errors.Wrapf(storetypes.ErrConflict, + return errors.Wrapf(store.ErrConflict, "snapshot for height %v format %v is currently being saved", height, format) } err := s.db.DeleteSync(encodeKey(height, format)) @@ -227,7 +227,7 @@ func (s *Store) Save( ) (*types.Snapshot, error) { defer DrainChunks(chunks) if height == 0 { - return nil, errors.Wrap(storetypes.ErrLogic, "snapshot height cannot be 0") + return nil, errors.Wrap(store.ErrLogic, "snapshot height cannot be 0") } s.mtx.Lock() @@ -235,7 +235,7 @@ func (s *Store) Save( s.saving[height] = true s.mtx.Unlock() if saving { - return nil, errors.Wrapf(storetypes.ErrConflict, + return nil, errors.Wrapf(store.ErrConflict, "a snapshot for height %v is already being saved", height) } defer func() { @@ -249,7 +249,7 @@ func (s *Store) Save( return nil, err } if exists { - return nil, errors.Wrapf(storetypes.ErrConflict, + return nil, errors.Wrapf(store.ErrConflict, "snapshot already exists for height %v format %v", height, format) } @@ -349,11 +349,12 @@ func (s *Store) PathChunk(height uint64, format, chunk uint32) string { // decodeKey decodes a snapshot key. func decodeKey(k []byte) (uint64, uint32, error) { if len(k) != 13 { - return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key with length %v", len(k)) + return 0, 0, errors.Wrapf(store.ErrLogic, "invalid snapshot key with length %v", len(k)) } if k[0] != keyPrefixSnapshot { - return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key prefix %x", k[0]) + return 0, 0, errors.Wrapf(store.ErrLogic, "invalid snapshot key prefix %x", k[0]) } + height := binary.BigEndian.Uint64(k[1:9]) format := binary.BigEndian.Uint32(k[9:13]) return height, format, nil diff --git a/store/snapshots/store_test.go b/store/snapshots/store_test.go index f4ff0ef74d..b202807cb6 100644 --- a/store/snapshots/store_test.go +++ b/store/snapshots/store_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "cosmossdk.io/store/snapshots" - "cosmossdk.io/store/snapshots/types" + "cosmossdk.io/store/v2/snapshots" + "cosmossdk.io/store/v2/snapshots/types" ) func setupStore(t *testing.T) *snapshots.Store { diff --git a/store/snapshots/types/util.go b/store/snapshots/types/util.go index 861647088b..4ffeb73375 100644 --- a/store/snapshots/types/util.go +++ b/store/snapshots/types/util.go @@ -1,6 +1,8 @@ package types import ( + "encoding/binary" + protoio "github.com/cosmos/gogoproto/io" ) @@ -14,3 +16,20 @@ func WriteExtensionPayload(protoWriter protoio.Writer, payload []byte) error { }, }) } + +// Uint64ToBigEndian - marshals uint64 to a big endian byte slice so it can be sorted +func Uint64ToBigEndian(i uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, i) + return b +} + +// BigEndianToUint64 returns an uint64 from big endian encoded bytes. If encoding +// is empty, zero is returned. +func BigEndianToUint64(bz []byte) uint64 { + if len(bz) == 0 { + return 0 + } + + return binary.BigEndian.Uint64(bz) +} diff --git a/store/storage/README.md b/store/storage/README.md new file mode 100644 index 0000000000..c82c3f3a2f --- /dev/null +++ b/store/storage/README.md @@ -0,0 +1,3 @@ +# State Storage (SS) + +TODO diff --git a/store/storage/pebbledb/batch.go b/store/storage/pebbledb/batch.go new file mode 100644 index 0000000000..88a7ec99c3 --- /dev/null +++ b/store/storage/pebbledb/batch.go @@ -0,0 +1,71 @@ +package pebbledb + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/cockroachdb/pebble" + + "cosmossdk.io/store/v2" +) + +var _ store.Batch = (*Batch)(nil) + +type Batch struct { + storage *pebble.DB + batch *pebble.Batch + version uint64 +} + +func NewBatch(storage *pebble.DB, version uint64) (*Batch, error) { + var versionBz [VersionSize]byte + binary.LittleEndian.PutUint64(versionBz[:], version) + + batch := storage.NewBatch() + + if err := batch.Set([]byte(latestVersionKey), versionBz[:], nil); err != nil { + return nil, fmt.Errorf("failed to write PebbleDB batch: %w", err) + } + + return &Batch{ + storage: storage, + batch: batch, + version: version, + }, nil +} + +func (b *Batch) Size() int { + return b.batch.Len() +} + +func (b *Batch) Reset() { + b.batch.Reset() +} + +func (b *Batch) set(storeKey string, tombstone uint64, key, value []byte) error { + prefixedKey := MVCCEncode(prependStoreKey(storeKey, key), b.version) + prefixedVal := MVCCEncode(value, tombstone) + + if err := b.batch.Set(prefixedKey, prefixedVal, nil); err != nil { + return fmt.Errorf("failed to write PebbleDB batch: %w", err) + } + + return nil +} + +func (b *Batch) Set(storeKey string, key, value []byte) error { + return b.set(storeKey, 0, key, value) +} + +func (b *Batch) Delete(storeKey string, key []byte) error { + return b.set(storeKey, b.version, key, []byte(tombstoneVal)) +} + +func (b *Batch) Write() (err error) { + defer func() { + err = errors.Join(err, b.batch.Close()) + }() + + return b.batch.Commit(defaultWriteOpts) +} diff --git a/store/storage/pebbledb/comparator.go b/store/storage/pebbledb/comparator.go new file mode 100644 index 0000000000..b6f5aef24a --- /dev/null +++ b/store/storage/pebbledb/comparator.go @@ -0,0 +1,235 @@ +package pebbledb + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/cockroachdb/pebble" +) + +// MVCCComparer returns a PebbleDB Comparer with encoding and decoding routines +// for MVCC control, used to compare and store versioned keys. +// +// Note: This Comparer implementation is largely based on PebbleDB's internal +// MVCC example, which can be found here: +// https://github.com/cockroachdb/pebble/blob/master/cmd/pebble/mvcc.go +var MVCCComparer = &pebble.Comparer{ + Name: "ss_pebbledb_comparator", + + Compare: MVCCKeyCompare, + + AbbreviatedKey: func(k []byte) uint64 { + key, _, ok := SplitMVCCKey(k) + if !ok { + return 0 + } + + return pebble.DefaultComparer.AbbreviatedKey(key) + }, + + Equal: func(a, b []byte) bool { + return MVCCKeyCompare(a, b) == 0 + }, + + Separator: func(dst, a, b []byte) []byte { + aKey, _, ok := SplitMVCCKey(a) + if !ok { + return append(dst, a...) + } + + bKey, _, ok := SplitMVCCKey(b) + if !ok { + return append(dst, a...) + } + + // if the keys are the same just return a + if bytes.Equal(aKey, bKey) { + return append(dst, a...) + } + + n := len(dst) + + // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the + // same semantics as pebble.DefaultComparer, so reuse the latter's Separator + // implementation. + dst = pebble.DefaultComparer.Separator(dst, aKey, bKey) + + // Did we pick a separator different than aKey? If we did not, we can't do + // better than a. + buf := dst[n:] + if bytes.Equal(aKey, buf) { + return append(dst[:n], a...) + } + + // The separator is > aKey, so we only need to add the timestamp sentinel. + return append(dst, 0) + }, + + ImmediateSuccessor: func(dst, a []byte) []byte { + // The key `a` is guaranteed to be a bare prefix: It's a key without a version + // — just a trailing 0-byte to signify the length of the version. For example + // the user key "foo" is encoded as: "foo\0". We need to encode the immediate + // successor to "foo", which in the natural byte ordering is "foo\0". Append + // a single additional zero, to encode the user key "foo\0" with a zero-length + // version. + return append(append(dst, a...), 0) + }, + + Successor: func(dst, a []byte) []byte { + aKey, _, ok := SplitMVCCKey(a) + if !ok { + return append(dst, a...) + } + + n := len(dst) + + // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the + // same semantics as pebble.DefaultComparer, so reuse the latter's Successor + // implementation. + dst = pebble.DefaultComparer.Successor(dst, aKey) + + // Did we pick a successor different than aKey? If we did not, we can't do + // better than a. + buf := dst[n:] + if bytes.Equal(aKey, buf) { + return append(dst[:n], a...) + } + + // The successor is > aKey, so we only need to add the timestamp sentinel. + return append(dst, 0) + }, + + FormatKey: func(k []byte) fmt.Formatter { + return mvccKeyFormatter{key: k} + }, + + Split: func(k []byte) int { + key, _, ok := SplitMVCCKey(k) + if !ok { + return len(k) + } + + // This matches the behavior of libroach/KeyPrefix. RocksDB requires that + // keys generated via a SliceTransform be comparable with normal encoded + // MVCC keys. Encoded MVCC keys have a suffix indicating the number of + // bytes of timestamp data. MVCC keys without a timestamp have a suffix of + // 0. We're careful in EncodeKey to make sure that the user-key always has + // a trailing 0. If there is no timestamp this falls out naturally. If + // there is a timestamp we prepend a 0 to the encoded timestamp data. + return len(key) + 1 + }, +} + +type mvccKeyFormatter struct { + key []byte +} + +func (f mvccKeyFormatter) Format(s fmt.State, verb rune) { + k, vBz, ok := SplitMVCCKey(f.key) + if ok { + v, _ := decodeUint64Ascending(vBz) + fmt.Fprintf(s, "%s/%d", k, v) + } else { + fmt.Fprintf(s, "%s", f.key) + } +} + +// SplitMVCCKey accepts an MVCC key and returns the "user" key, the MVCC version, +// and a boolean indicating if the provided key is an MVCC key. +func SplitMVCCKey(mvccKey []byte) (key, version []byte, ok bool) { + if len(mvccKey) == 0 { + return nil, nil, false + } + + n := len(mvccKey) - 1 + tsLen := int(mvccKey[n]) + if n < tsLen { + return nil, nil, false + } + + key = mvccKey[:n-tsLen] + if tsLen > 0 { + version = mvccKey[n-tsLen+1 : len(mvccKey)-1] + } + + return key, version, true +} + +// MVCCKeyCompare compares two MVCC keys. +func MVCCKeyCompare(a, b []byte) int { + aEnd := len(a) - 1 + bEnd := len(b) - 1 + if aEnd < 0 || bEnd < 0 { + // This should never happen unless there is some sort of corruption of + // the keys. This is a little bizarre, but the behavior exactly matches + // engine/db.cc:DBComparator. + return bytes.Compare(a, b) + } + + // Compute the index of the separator between the key and the timestamp. + aSep := aEnd - int(a[aEnd]) + bSep := bEnd - int(b[bEnd]) + if aSep < 0 || bSep < 0 { + // This should never happen unless there is some sort of corruption of + // the keys. This is a little bizarre, but the behavior exactly matches + // engine/db.cc:DBComparator. + return bytes.Compare(a, b) + } + + // compare the "user key" part of the key + if c := bytes.Compare(a[:aSep], b[:bSep]); c != 0 { + return c + } + + // compare the timestamp part of the key + aTS := a[aSep:aEnd] + bTS := b[bSep:bEnd] + if len(aTS) == 0 { + if len(bTS) == 0 { + return 0 + } + return -1 + } else if len(bTS) == 0 { + return 1 + } + + return bytes.Compare(aTS, bTS) +} + +// \x00[]<#version-bytes> +func MVCCEncode(key []byte, version uint64) (dst []byte) { + dst = append(dst, key...) + dst = append(dst, 0) + + if version != 0 { + extra := byte(1 + 8) + dst = encodeUint64Ascending(dst, version) + dst = append(dst, extra) + } + + return dst +} + +// encodeUint64Ascending encodes the uint64 value using a big-endian 8 byte +// representation. The bytes are appended to the supplied buffer and +// the final buffer is returned. +func encodeUint64Ascending(dst []byte, v uint64) []byte { + return append( + dst, + byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), + byte(v>>24), byte(v>>16), byte(v>>8), byte(v), + ) +} + +// decodeUint64Ascending decodes a uint64 from the input buffer, treating +// the input as a big-endian 8 byte uint64 representation. The decoded uint64 is +// returned. +func decodeUint64Ascending(b []byte) (uint64, error) { + if len(b) < 8 { + return 0, fmt.Errorf("insufficient bytes to decode uint64 int value; expected 8; got %d", len(b)) + } + + v := binary.BigEndian.Uint64(b) + return v, nil +} diff --git a/store/storage/pebbledb/comparator_test.go b/store/storage/pebbledb/comparator_test.go new file mode 100644 index 0000000000..1affd81b40 --- /dev/null +++ b/store/storage/pebbledb/comparator_test.go @@ -0,0 +1,58 @@ +package pebbledb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMVCCKey(t *testing.T) { + for i := uint64(1); i < 1001; i++ { + keyA := MVCCEncode([]byte("key001"), i) + + key, vBz, ok := SplitMVCCKey(keyA) + + version, err := decodeUint64Ascending(vBz) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, i, version) + require.Equal(t, []byte("key001"), key) + } +} + +func TestMVCCKeyCompare(t *testing.T) { + testCases := []struct { + keyA []byte + keyB []byte + expected int + }{ + { + // same key, same version + keyA: MVCCEncode([]byte("key001"), 1), + keyB: MVCCEncode([]byte("key001"), 1), + expected: 0, + }, + { + // same key, different version + keyA: MVCCEncode([]byte("key001"), 1), + keyB: MVCCEncode([]byte("key001"), 2), + expected: -1, + }, + { + // same key, different version (inverse) + keyA: MVCCEncode([]byte("key001"), 2), + keyB: MVCCEncode([]byte("key001"), 1), + expected: 1, + }, + { + // different key, same version + keyA: MVCCEncode([]byte("key001"), 1), + keyB: MVCCEncode([]byte("key009"), 1), + expected: -1, + }, + } + + for _, tc := range testCases { + require.Equalf(t, tc.expected, MVCCKeyCompare(tc.keyA, tc.keyB), "keyA: %s, keyB: %s", tc.keyA, tc.keyB) + } +} diff --git a/store/storage/pebbledb/db.go b/store/storage/pebbledb/db.go new file mode 100644 index 0000000000..910a9d347e --- /dev/null +++ b/store/storage/pebbledb/db.go @@ -0,0 +1,235 @@ +package pebbledb + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "slices" + + "github.com/cockroachdb/pebble" + + "cosmossdk.io/store/v2" +) + +const ( + VersionSize = 8 + + StorePrefixTpl = "s/k:%s/" // s/k: + latestVersionKey = "s/_latest" // NB: latestVersionKey key must be lexically smaller than StorePrefixTpl + tombstoneVal = "TOMBSTONE" +) + +var ( + _ store.VersionedDatabase = (*Database)(nil) + + defaultWriteOpts = pebble.Sync +) + +type Database struct { + storage *pebble.DB +} + +func New(dataDir string) (*Database, error) { + opts := &pebble.Options{ + Comparer: MVCCComparer, + } + opts = opts.EnsureDefaults() + + db, err := pebble.Open(dataDir, opts) + if err != nil { + return nil, fmt.Errorf("failed to open PebbleDB: %w", err) + } + + return &Database{ + storage: db, + }, nil +} + +func NewWithDB(storage *pebble.DB) *Database { + return &Database{ + storage: storage, + } +} + +func (db *Database) Close() error { + err := db.storage.Close() + db.storage = nil + return err +} + +func (db *Database) SetLatestVersion(version uint64) error { + var ts [VersionSize]byte + binary.LittleEndian.PutUint64(ts[:], version) + return db.storage.Set([]byte(latestVersionKey), ts[:], defaultWriteOpts) +} + +func (db *Database) GetLatestVersion() (uint64, error) { + bz, closer, err := db.storage.Get([]byte(latestVersionKey)) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + // in case of a fresh database + return 0, nil + } + + return 0, err + } + + if len(bz) == 0 { + return 0, closer.Close() + } + + return binary.LittleEndian.Uint64(bz), closer.Close() +} + +func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, error) { + val, err := db.Get(storeKey, version, key) + if err != nil { + return false, err + } + + return val != nil, nil +} + +func (db *Database) Get(storeKey string, targetVersion uint64, key []byte) ([]byte, error) { + prefixedVal, err := getMVCCSlice(db.storage, storeKey, key, targetVersion) + if err != nil { + if errors.Is(err, store.ErrRecordNotFound) { + return nil, nil + } + + return nil, fmt.Errorf("failed to perform PebbleDB read: %w", err) + } + + valBz, tombBz, ok := SplitMVCCKey(prefixedVal) + if !ok { + return nil, fmt.Errorf("invalid PebbleDB MVCC value: %s", prefixedVal) + } + + // A tombstone of zero or a target version that is less than the tombstone + // version means the key is not deleted at the target version. + if len(tombBz) == 0 { + return valBz, nil + } + + tombstone, err := decodeUint64Ascending(tombBz) + if err != nil { + return nil, fmt.Errorf("failed to decode value tombstone: %w", err) + } + if tombstone > targetVersion { + return nil, fmt.Errorf("value tombstone too large: %d", tombstone) + } + + // A tombstone of zero or a target version that is less than the tombstone + // version means the key is not deleted at the target version. + if targetVersion < tombstone { + return valBz, nil + } + + // the value is considered deleted + return nil, nil +} + +func (db *Database) ApplyChangeset(version uint64, cs *store.Changeset) error { + b, err := NewBatch(db.storage, version) + if err != nil { + return err + } + + for _, kvPair := range cs.Pairs { + if kvPair.Value == nil { + if err := b.Delete(kvPair.StoreKey, kvPair.Key); err != nil { + return err + } + } else { + if err := b.Set(kvPair.StoreKey, kvPair.Key, kvPair.Value); err != nil { + return err + } + } + } + + return b.Write() +} + +// Prune for the PebbleDB SS backend is currently not supported. It seems the only +// reliable way to prune is to iterate over the desired domain and either manually +// tombstone or delete. Either way, the operation would be timely. +// +// See: https://github.com/cockroachdb/cockroach/blob/33623e3ee420174a4fd3226d1284b03f0e3caaac/pkg/storage/mvcc.go#L3182 +func (db *Database) Prune(version uint64) error { + panic("not implemented!") +} + +func (db *Database) Iterator(storeKey string, version uint64, start, end []byte) (store.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, store.ErrKeyEmpty + } + + if start != nil && end != nil && bytes.Compare(start, end) > 0 { + return nil, store.ErrStartAfterEnd + } + + lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0) + + var upperBound []byte + if end != nil { + upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0) + } + + itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound}) + if err != nil { + return nil, fmt.Errorf("failed to create PebbleDB iterator: %w", err) + } + + return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version), nil +} + +func (db *Database) ReverseIterator(storeKey string, version uint64, start, end []byte) (store.Iterator, error) { + panic("not implemented!") +} + +func storePrefix(storeKey string) []byte { + return []byte(fmt.Sprintf(StorePrefixTpl, storeKey)) +} + +func prependStoreKey(storeKey string, key []byte) []byte { + return append(storePrefix(storeKey), key...) +} + +func getMVCCSlice(db *pebble.DB, storeKey string, key []byte, version uint64) ([]byte, error) { + // end domain is exclusive, so we need to increment the version by 1 + if version < math.MaxUint64 { + version++ + } + + itr, err := db.NewIter(&pebble.IterOptions{ + LowerBound: MVCCEncode(prependStoreKey(storeKey, key), 0), + UpperBound: MVCCEncode(prependStoreKey(storeKey, key), version), + }) + if err != nil { + return nil, fmt.Errorf("failed to create PebbleDB iterator: %w", err) + } + defer func() { + err = errors.Join(err, itr.Close()) + }() + + if !itr.Last() { + return nil, store.ErrRecordNotFound + } + + _, vBz, ok := SplitMVCCKey(itr.Key()) + if !ok { + return nil, fmt.Errorf("invalid PebbleDB MVCC key: %s", itr.Key()) + } + + keyVersion, err := decodeUint64Ascending(vBz) + if err != nil { + return nil, fmt.Errorf("failed to decode key version: %w", err) + } + if keyVersion > version { + return nil, fmt.Errorf("key version too large: %d", keyVersion) + } + + return slices.Clone(itr.Value()), nil +} diff --git a/store/storage/pebbledb/db_test.go b/store/storage/pebbledb/db_test.go new file mode 100644 index 0000000000..43178c209e --- /dev/null +++ b/store/storage/pebbledb/db_test.go @@ -0,0 +1,36 @@ +package pebbledb + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/storage" +) + +const ( + storeKey1 = "store1" +) + +func TestStorageTestSuite(t *testing.T) { + s := &storage.StorageTestSuite{ + NewDB: func(dir string) (store.VersionedDatabase, error) { + return New(dir) + }, + EmptyBatchSize: 12, + SkipTests: []string{ + "TestStorageTestSuite/TestDatabase_Prune", + }, + } + suite.Run(t, s) +} + +func TestDatabase_ReverseIterator(t *testing.T) { + db, err := New(t.TempDir()) + require.NoError(t, err) + defer db.Close() + + require.Panics(t, func() { _, _ = db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) }) +} diff --git a/store/storage/pebbledb/iterator.go b/store/storage/pebbledb/iterator.go new file mode 100644 index 0000000000..9c301bf840 --- /dev/null +++ b/store/storage/pebbledb/iterator.go @@ -0,0 +1,256 @@ +package pebbledb + +import ( + "bytes" + "fmt" + "slices" + + "github.com/cockroachdb/pebble" + + "cosmossdk.io/store/v2" +) + +var _ store.Iterator = (*iterator)(nil) + +// iterator implements the store.Iterator interface. It wraps a PebbleDB iterator +// with added MVCC key handling logic. The iterator will iterate over the key space +// in the provided domain for a given version. If a key has been written at the +// provided version, that key/value pair will be iterated over. Otherwise, the +// latest version for that key/value pair will be iterated over s.t. it's less +// than the provided version. Note: +// +// - The start key must not be empty. +// - Currently, reverse iteration is NOT supported. +type iterator struct { + source *pebble.Iterator + prefix, start, end []byte + version uint64 + valid bool +} + +func newPebbleDBIterator(src *pebble.Iterator, prefix, mvccStart, mvccEnd []byte, version uint64) *iterator { + // move the underlying PebbleDB iterator to the first key + valid := src.First() + if valid { + // The first key may not represent the desired target version, so move the + // cursor to the correct location. + firstKey, _, ok := SplitMVCCKey(src.Key()) + if !ok { + // XXX: This should not happen as that would indicate we have a malformed + // MVCC key. + valid = false + } else { + valid = src.SeekLT(MVCCEncode(firstKey, version+1)) + } + } + + itr := &iterator{ + source: src, + prefix: prefix, + start: mvccStart, + end: mvccEnd, + version: version, + valid: valid, + } + + // The cursor might now be pointing at a key/value pair that is tombstoned. + // If so, we must move the cursor. + if itr.valid && itr.cursorTombstoned() { + itr.valid = itr.Next() + } + + return itr +} + +// Domain returns the domain of the iterator. The caller must not modify the +// return values. +func (itr *iterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr *iterator) Key() []byte { + itr.assertIsValid() + + key, _, ok := SplitMVCCKey(itr.source.Key()) + if !ok { + // XXX: This should not happen as that would indicate we have a malformed + // MVCC key. + panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) + } + + keyCopy := slices.Clone(key) + return keyCopy[len(itr.prefix):] +} + +func (itr *iterator) Value() []byte { + itr.assertIsValid() + + val, _, ok := SplitMVCCKey(itr.source.Value()) + if !ok { + // XXX: This should not happen as that would indicate we have a malformed + // MVCC value. + panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) + } + + return slices.Clone(val) +} + +func (itr *iterator) Next() bool { + // First move the iterator to the next prefix, which may not correspond to the + // desired version for that key, e.g. if the key was written at a later version, + // so we seek back to the latest desired version, s.t. the version is <= itr.version. + if itr.source.NextPrefix() { + nextKey, _, ok := SplitMVCCKey(itr.source.Key()) + if !ok { + // XXX: This should not happen as that would indicate we have a malformed + // MVCC key. + itr.valid = false + return itr.valid + } + if !bytes.HasPrefix(nextKey, itr.prefix) { + // the next key must have itr.prefix as the prefix + itr.valid = false + return itr.valid + } + + // Move the iterator to the closest version to the desired version, so we + // append the current iterator key to the prefix and seek to that key. + itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) + + // The cursor might now be pointing at a key/value pair that is tombstoned. + // If so, we must move the cursor. + if itr.valid && itr.cursorTombstoned() { + itr.valid = itr.Next() + } + + return itr.valid + } + + itr.valid = false + return itr.valid +} + +func (itr *iterator) Valid() bool { + // once invalid, forever invalid + if !itr.valid || !itr.source.Valid() { + itr.valid = false + return itr.valid + } + + // if source has error, consider it invalid + if err := itr.source.Error(); err != nil { + itr.valid = false + return itr.valid + } + + // if key is at the end or past it, consider it invalid + if end := itr.end; end != nil { + if bytes.Compare(end, itr.Key()) <= 0 { + itr.valid = false + return itr.valid + } + } + + return true +} + +func (itr *iterator) Error() error { + return itr.source.Error() +} + +func (itr *iterator) Close() { + _ = itr.source.Close() + itr.source = nil + itr.valid = false +} + +func (itr *iterator) assertIsValid() { + if !itr.valid { + panic("iterator is invalid") + } +} + +// cursorTombstoned checks if the current cursor is pointing at a key/value pair +// that is tombstoned. If the cursor is tombstoned, is returned, otherwise +// is returned. In the case where the iterator is valid but the key/value +// pair is tombstoned, the caller should call Next(). Note, this method assumes +// the caller assures the iterator is valid first! +func (itr *iterator) cursorTombstoned() bool { + _, tombBz, ok := SplitMVCCKey(itr.source.Value()) + if !ok { + // XXX: This should not happen as that would indicate we have a malformed + // MVCC value. + panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) + } + + // If the tombstone suffix is empty, we consider this a zero value and thus it + // is not tombstoned. + if len(tombBz) == 0 { + return false + } + + // If the tombstone suffix is non-empty and greater than the target version, + // the value is not tombstoned. + tombstone, err := decodeUint64Ascending(tombBz) + if err != nil { + panic(fmt.Errorf("failed to decode value tombstone: %w", err)) + } + if tombstone > itr.version { + return false + } + + return true +} + +func (itr *iterator) DebugRawIterate() { + valid := itr.source.Valid() + if valid { + // The first key may not represent the desired target version, so move the + // cursor to the correct location. + firstKey, _, _ := SplitMVCCKey(itr.source.Key()) + valid = itr.source.SeekLT(MVCCEncode(firstKey, itr.version+1)) + } + + for valid { + key, vBz, ok := SplitMVCCKey(itr.source.Key()) + if !ok { + panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) + } + + version, err := decodeUint64Ascending(vBz) + if err != nil { + panic(fmt.Errorf("failed to decode key version: %w", err)) + } + + val, tombBz, ok := SplitMVCCKey(itr.source.Value()) + if !ok { + panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Value())) + } + + var tombstone uint64 + if len(tombBz) > 0 { + tombstone, err = decodeUint64Ascending(vBz) + if err != nil { + panic(fmt.Errorf("failed to decode value tombstone: %w", err)) + } + } + + fmt.Printf("KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", key, val, version, tombstone) + + if itr.source.NextPrefix() { + nextKey, _, ok := SplitMVCCKey(itr.source.Key()) + if !ok { + panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) + } + + // the next key must have itr.prefix as the prefix + if !bytes.HasPrefix(nextKey, itr.prefix) { + valid = false + } else { + valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) + } + } else { + valid = false + } + } +} diff --git a/store/storage/rocksdb/batch.go b/store/storage/rocksdb/batch.go new file mode 100644 index 0000000000..79b216b50c --- /dev/null +++ b/store/storage/rocksdb/batch.go @@ -0,0 +1,62 @@ +//go:build rocksdb +// +build rocksdb + +package rocksdb + +import ( + "encoding/binary" + + "github.com/linxGnu/grocksdb" +) + +type Batch struct { + version uint64 + ts [TimestampSize]byte + storage *grocksdb.DB + cfHandle *grocksdb.ColumnFamilyHandle + batch *grocksdb.WriteBatch +} + +// NewBatch creates a new versioned batch used for batch writes. The caller +// must ensure to call Write() on the returned batch to commit the changes and to +// destroy the batch when done. +func NewBatch(db *Database, version uint64) Batch { + var ts [TimestampSize]byte + binary.LittleEndian.PutUint64(ts[:], uint64(version)) + + batch := grocksdb.NewWriteBatch() + batch.Put([]byte(latestVersionKey), ts[:]) + + return Batch{ + version: version, + ts: ts, + storage: db.storage, + cfHandle: db.cfHandle, + batch: batch, + } +} + +func (b Batch) Size() int { + return len(b.batch.Data()) +} + +func (b Batch) Reset() { + b.batch.Clear() +} + +func (b Batch) Set(storeKey string, key, value []byte) error { + prefixedKey := prependStoreKey(storeKey, key) + b.batch.PutCFWithTS(b.cfHandle, prefixedKey, b.ts[:], value) + return nil +} + +func (b Batch) Delete(storeKey string, key []byte) error { + prefixedKey := prependStoreKey(storeKey, key) + b.batch.DeleteCFWithTS(b.cfHandle, prefixedKey, b.ts[:]) + return nil +} + +func (b Batch) Write() error { + defer b.batch.Destroy() + return b.storage.Write(defaultWriteOpts, b.batch) +} diff --git a/store/storage/rocksdb/comparator.go b/store/storage/rocksdb/comparator.go new file mode 100644 index 0000000000..3af816aba4 --- /dev/null +++ b/store/storage/rocksdb/comparator.go @@ -0,0 +1,76 @@ +//go:build rocksdb +// +build rocksdb + +package rocksdb + +import ( + "bytes" + "encoding/binary" + + "github.com/linxGnu/grocksdb" +) + +// CreateTSComparator should behavior identical with RocksDB builtin timestamp comparator. +// We also use the same builtin comparator name so the builtin tools `ldb`/`sst_dump` +// can work with the database. +func CreateTSComparator() *grocksdb.Comparator { + return grocksdb.NewComparatorWithTimestamp( + "leveldb.BytewiseComparator.u64ts", + TimestampSize, + compare, + compareTS, + compareWithoutTS, + ) +} + +// compareTS compares timestamp as little endian encoded integers. +// +// NOTICE: The behavior must be identical to RocksDB builtin comparator +// "leveldb.BytewiseComparator.u64ts". +func compareTS(bz1 []byte, bz2 []byte) int { + ts1 := binary.LittleEndian.Uint64(bz1) + ts2 := binary.LittleEndian.Uint64(bz2) + + switch { + case ts1 < ts2: + return -1 + + case ts1 > ts2: + return 1 + + default: + return 0 + } +} + +// compare compares two internal keys with timestamp suffix, larger timestamp +// comes first. +// +// NOTICE: The behavior must be identical to RocksDB builtin comparator +// "leveldb.BytewiseComparator.u64ts". +func compare(a []byte, b []byte) int { + ret := compareWithoutTS(a, true, b, true) + if ret != 0 { + return ret + } + + // Compare timestamp. For the same user key with different timestamps, larger + // (newer) timestamp comes first, which means seek operation will try to find + // a version less than or equal to the target version. + return -compareTS(a[len(a)-TimestampSize:], b[len(b)-TimestampSize:]) +} + +// compareWithoutTS compares two internal keys without the timestamp part. +// +// NOTICE: the behavior must be identical to RocksDB builtin comparator +// "leveldb.BytewiseComparator.u64ts". +func compareWithoutTS(a []byte, aHasTS bool, b []byte, bHasTS bool) int { + if aHasTS { + a = a[:len(a)-TimestampSize] + } + if bHasTS { + b = b[:len(b)-TimestampSize] + } + + return bytes.Compare(a, b) +} diff --git a/store/storage/rocksdb/db.go b/store/storage/rocksdb/db.go new file mode 100644 index 0000000000..a0924c9634 --- /dev/null +++ b/store/storage/rocksdb/db.go @@ -0,0 +1,255 @@ +//go:build rocksdb +// +build rocksdb + +package rocksdb + +import ( + "bytes" + "encoding/binary" + "fmt" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/storage/util" + "github.com/linxGnu/grocksdb" + "golang.org/x/exp/slices" +) + +const ( + TimestampSize = 8 + + StorePrefixTpl = "s/k:%s/" + latestVersionKey = "s/latest" +) + +var ( + _ store.VersionedDatabase = (*Database)(nil) + + defaultWriteOpts = grocksdb.NewDefaultWriteOptions() + defaultReadOpts = grocksdb.NewDefaultReadOptions() +) + +type Database struct { + storage *grocksdb.DB + cfHandle *grocksdb.ColumnFamilyHandle + + // tsLow reflects the full_history_ts_low CF value. Since pruning is done in + // a lazy manner, we use this value to prevent reads for versions that will + // be purged in the next compaction. + tsLow uint64 +} + +func New(dataDir string) (*Database, error) { + storage, cfHandle, err := OpenRocksDB(dataDir) + if err != nil { + return nil, fmt.Errorf("failed to open RocksDB: %w", err) + } + + slice, err := storage.GetFullHistoryTsLow(cfHandle) + if err != nil { + return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) + } + + var tsLow uint64 + tsLowBz := copyAndFreeSlice(slice) + if len(tsLowBz) > 0 { + tsLow = binary.LittleEndian.Uint64(tsLowBz) + } + + return &Database{ + storage: storage, + cfHandle: cfHandle, + tsLow: tsLow, + }, nil +} + +func NewWithDB(storage *grocksdb.DB, cfHandle *grocksdb.ColumnFamilyHandle) (*Database, error) { + slice, err := storage.GetFullHistoryTsLow(cfHandle) + if err != nil { + return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) + } + + var tsLow uint64 + tsLowBz := copyAndFreeSlice(slice) + if len(tsLowBz) > 0 { + tsLow = binary.LittleEndian.Uint64(tsLowBz) + } + return &Database{ + storage: storage, + cfHandle: cfHandle, + tsLow: tsLow, + }, nil +} + +func (db *Database) Close() error { + db.storage.Close() + + db.storage = nil + db.cfHandle = nil + + return nil +} + +func (db *Database) getSlice(storeKey string, version uint64, key []byte) (*grocksdb.Slice, error) { + return db.storage.GetCF( + newTSReadOptions(version), + db.cfHandle, + prependStoreKey(storeKey, key), + ) +} + +func (db *Database) SetLatestVersion(version uint64) error { + var ts [TimestampSize]byte + binary.LittleEndian.PutUint64(ts[:], version) + + return db.storage.Put(defaultWriteOpts, []byte(latestVersionKey), ts[:]) +} + +func (db *Database) GetLatestVersion() (uint64, error) { + bz, err := db.storage.GetBytes(defaultReadOpts, []byte(latestVersionKey)) + if err != nil { + return 0, err + } + + if len(bz) == 0 { + // in case of a fresh database + return 0, nil + } + + return binary.LittleEndian.Uint64(bz), nil +} + +func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, error) { + if version < db.tsLow { + return false, nil + } + + slice, err := db.getSlice(storeKey, version, key) + if err != nil { + return false, err + } + + return slice.Exists(), nil +} + +func (db *Database) Get(storeKey string, version uint64, key []byte) ([]byte, error) { + if version < db.tsLow { + return nil, nil + } + + slice, err := db.getSlice(storeKey, version, key) + if err != nil { + return nil, fmt.Errorf("failed to get RocksDB slice: %w", err) + } + + return copyAndFreeSlice(slice), nil +} + +func (db *Database) ApplyChangeset(version uint64, cs *store.Changeset) error { + b := NewBatch(db, version) + + for _, kvPair := range cs.Pairs { + if kvPair.Value == nil { + if err := b.Delete(kvPair.StoreKey, kvPair.Key); err != nil { + return err + } + } else { + if err := b.Set(kvPair.StoreKey, kvPair.Key, kvPair.Value); err != nil { + return err + } + } + } + + return b.Write() +} + +// Prune attempts to prune all versions up to and including the provided version. +// This is done internally by updating the full_history_ts_low RocksDB value on +// the column families, s.t. all versions less than full_history_ts_low will be +// dropped. +// +// Note, this does NOT incur an immediate full compaction, i.e. this performs a +// lazy prune. Future compactions will honor the increased full_history_ts_low +// and trim history when possible. +func (db *Database) Prune(version uint64) error { + tsLow := version + 1 // we increment by 1 to include the provided version + + var ts [TimestampSize]byte + binary.LittleEndian.PutUint64(ts[:], tsLow) + + if err := db.storage.IncreaseFullHistoryTsLow(db.cfHandle, ts[:]); err != nil { + return fmt.Errorf("failed to update column family full_history_ts_low: %w", err) + } + + db.tsLow = tsLow + return nil +} + +func (db *Database) Iterator(storeKey string, version uint64, start, end []byte) (store.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, store.ErrKeyEmpty + } + + if start != nil && end != nil && bytes.Compare(start, end) > 0 { + return nil, store.ErrStartAfterEnd + } + + prefix := storePrefix(storeKey) + start, end = util.IterateWithPrefix(prefix, start, end) + + itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) + return newRocksDBIterator(itr, prefix, start, end, false), nil +} + +func (db *Database) ReverseIterator(storeKey string, version uint64, start, end []byte) (store.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, store.ErrKeyEmpty + } + + if start != nil && end != nil && bytes.Compare(start, end) > 0 { + return nil, store.ErrStartAfterEnd + } + + prefix := storePrefix(storeKey) + start, end = util.IterateWithPrefix(prefix, start, end) + + itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) + return newRocksDBIterator(itr, prefix, start, end, true), nil +} + +// newTSReadOptions returns ReadOptions used in the RocksDB column family read. +func newTSReadOptions(version uint64) *grocksdb.ReadOptions { + var ts [TimestampSize]byte + binary.LittleEndian.PutUint64(ts[:], version) + + readOpts := grocksdb.NewDefaultReadOptions() + readOpts.SetTimestamp(ts[:]) + + return readOpts +} + +func storePrefix(storeKey string) []byte { + return []byte(fmt.Sprintf(StorePrefixTpl, storeKey)) +} + +func prependStoreKey(storeKey string, key []byte) []byte { + return append(storePrefix(storeKey), key...) +} + +// copyAndFreeSlice will copy a given RocksDB slice and free it. If the slice does +// not exist, will be returned. +func copyAndFreeSlice(s *grocksdb.Slice) []byte { + defer s.Free() + if !s.Exists() { + return nil + } + + return slices.Clone(s.Data()) +} + +func readOnlySlice(s *grocksdb.Slice) []byte { + if !s.Exists() { + return nil + } + + return s.Data() +} diff --git a/store/storage/rocksdb/db_test.go b/store/storage/rocksdb/db_test.go new file mode 100644 index 0000000000..0f5a5dad8a --- /dev/null +++ b/store/storage/rocksdb/db_test.go @@ -0,0 +1,91 @@ +//go:build rocksdb +// +build rocksdb + +package rocksdb + +import ( + "fmt" + "testing" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/storage" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const ( + storeKey1 = "store1" +) + +func TestStorageTestSuite(t *testing.T) { + s := &storage.StorageTestSuite{ + NewDB: func(dir string) (store.VersionedDatabase, error) { + return New(dir) + }, + EmptyBatchSize: 12, + } + suite.Run(t, s) +} + +func TestDatabase_ReverseIterator(t *testing.T) { + db, err := New(t.TempDir()) + require.NoError(t, err) + defer db.Close() + + cs := new(store.Changeset) + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(key), Value: []byte(val)}) + } + + require.NoError(t, db.ApplyChangeset(1, cs)) + + // reverse iterator without an end key + iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) + require.NoError(t, err) + + defer iter.Close() + + i, count := 99, 0 + for ; iter.Valid(); iter.Next() { + require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key()) + require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value()) + + i-- + count++ + } + require.Equal(t, 100, count) + require.NoError(t, iter.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + require.False(t, iter.Next()) + require.False(t, iter.Valid()) + + // reverse iterator with with a start and end domain + iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019")) + require.NoError(t, err) + + defer iter2.Close() + + i, count = 18, 0 + for ; iter2.Valid(); iter2.Next() { + require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key()) + require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value()) + + i-- + count++ + } + require.Equal(t, 9, count) + require.NoError(t, iter2.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + require.False(t, iter2.Next()) + require.False(t, iter2.Valid()) + + // start must be <= end + iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019")) + require.Error(t, err) + require.Nil(t, iter3) +} diff --git a/store/storage/rocksdb/iterator.go b/store/storage/rocksdb/iterator.go new file mode 100644 index 0000000000..268f76d16a --- /dev/null +++ b/store/storage/rocksdb/iterator.go @@ -0,0 +1,154 @@ +//go:build rocksdb +// +build rocksdb + +package rocksdb + +import ( + "bytes" + + "cosmossdk.io/store/v2" + "github.com/linxGnu/grocksdb" +) + +var _ store.Iterator = (*iterator)(nil) + +type iterator struct { + source *grocksdb.Iterator + prefix, start, end []byte + reverse bool + invalid bool +} + +func newRocksDBIterator(source *grocksdb.Iterator, prefix, start, end []byte, reverse bool) *iterator { + if reverse { + if end == nil { + source.SeekToLast() + } else { + source.Seek(end) + + if source.Valid() { + eoaKey := readOnlySlice(source.Key()) // end or after key + if bytes.Compare(end, eoaKey) <= 0 { + source.Prev() + } + } else { + source.SeekToLast() + } + } + } else { + if start == nil { + source.SeekToFirst() + } else { + source.Seek(start) + } + } + + return &iterator{ + source: source, + prefix: prefix, + start: start, + end: end, + reverse: reverse, + invalid: !source.Valid(), + } +} + +// Domain returns the domain of the iterator. The caller must not modify the +// return values. +func (itr *iterator) Domain() ([]byte, []byte) { + start := itr.start + if start != nil { + start = start[len(itr.prefix):] + if len(start) == 0 { + start = nil + } + } + + end := itr.end + if end != nil { + end = end[len(itr.prefix):] + if len(end) == 0 { + end = nil + } + } + + return start, end +} + +func (itr *iterator) Valid() bool { + // once invalid, forever invalid + if itr.invalid { + return false + } + + // if source has error, consider it invalid + if err := itr.source.Err(); err != nil { + itr.invalid = true + return false + } + + // if source is invalid, consider it invalid + if !itr.source.Valid() { + itr.invalid = true + return false + } + + // if key is at the end or past it, consider it invalid + start := itr.start + end := itr.end + key := readOnlySlice(itr.source.Key()) + + if itr.reverse { + if start != nil && bytes.Compare(key, start) < 0 { + itr.invalid = true + return false + } + } else { + if end != nil && bytes.Compare(end, key) <= 0 { + itr.invalid = true + return false + } + } + + return true +} + +func (itr *iterator) Key() []byte { + itr.assertIsValid() + return copyAndFreeSlice(itr.source.Key())[len(itr.prefix):] +} + +func (itr *iterator) Value() []byte { + itr.assertIsValid() + return copyAndFreeSlice(itr.source.Value()) +} + +func (itr iterator) Next() bool { + if itr.invalid { + return false + } + + if itr.reverse { + itr.source.Prev() + } else { + itr.source.Next() + } + + return itr.Valid() +} + +func (itr *iterator) Error() error { + return itr.source.Err() +} + +func (itr *iterator) Close() { + itr.source.Close() + itr.source = nil + itr.invalid = true +} + +func (itr *iterator) assertIsValid() { + if itr.invalid { + panic("iterator is invalid") + } +} diff --git a/store/storage/rocksdb/opts.go b/store/storage/rocksdb/opts.go new file mode 100644 index 0000000000..bf2272c17c --- /dev/null +++ b/store/storage/rocksdb/opts.go @@ -0,0 +1,125 @@ +//go:build rocksdb +// +build rocksdb + +package rocksdb + +import ( + "encoding/binary" + "runtime" + + "github.com/linxGnu/grocksdb" +) + +const ( + // CFNameStateStorage defines the RocksDB column family name for versioned state + // storage. + CFNameStateStorage = "state_storage" + + // CFNameDefault defines the RocksDB column family name for the default column. + CFNameDefault = "default" +) + +// NewRocksDBOpts returns the options used for the RocksDB column family for use +// in state storage. +// +// FIXME: We do not enable dict compression for SSTFileWriter, because otherwise +// the file writer won't report correct file size. +// Ref: https://github.com/facebook/rocksdb/issues/11146 +func NewRocksDBOpts(sstFileWriter bool) *grocksdb.Options { + opts := grocksdb.NewDefaultOptions() + opts.SetCreateIfMissing(true) + opts.SetComparator(CreateTSComparator()) + opts.IncreaseParallelism(runtime.NumCPU()) + opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) + opts.SetTargetFileSizeMultiplier(2) + opts.SetLevelCompactionDynamicLevelBytes(true) + + // block based table options + bbto := grocksdb.NewDefaultBlockBasedTableOptions() + + // 1G block cache + bbto.SetBlockSize(32 * 1024) + bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30)) + + bbto.SetFilterPolicy(grocksdb.NewRibbonHybridFilterPolicy(9.9, 1)) + bbto.SetIndexType(grocksdb.KBinarySearchWithFirstKey) + bbto.SetOptimizeFiltersForMemory(true) + opts.SetBlockBasedTableFactory(bbto) + + // Improve sst file creation speed: compaction or sst file writer. + opts.SetCompressionOptionsParallelThreads(4) + + if !sstFileWriter { + // compression options at bottommost level + opts.SetBottommostCompression(grocksdb.ZSTDCompression) + + compressOpts := grocksdb.NewDefaultCompressionOptions() + compressOpts.MaxDictBytes = 112640 // 110k + compressOpts.Level = 12 + + opts.SetBottommostCompressionOptions(compressOpts, true) + opts.SetBottommostCompressionOptionsZstdMaxTrainBytes(compressOpts.MaxDictBytes*100, true) + } + + return opts +} + +// OpenRocksDB opens a RocksDB database connection for versioned reading and writing. +// It also returns a column family handle for versioning using user-defined timestamps. +// The default column family is used for metadata, specifically key/value pairs +// that are stored on another column family named with "state_storage", which has +// user-defined timestamp enabled. +func OpenRocksDB(dataDir string) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { + opts := grocksdb.NewDefaultOptions() + opts.SetCreateIfMissing(true) + opts.SetCreateIfMissingColumnFamilies(true) + + db, cfHandles, err := grocksdb.OpenDbColumnFamilies( + opts, + dataDir, + []string{ + CFNameDefault, + CFNameStateStorage, + }, + []*grocksdb.Options{ + opts, + NewRocksDBOpts(false), + }, + ) + if err != nil { + return nil, nil, err + } + + return db, cfHandles[1], nil +} + +// OpenRocksDBAndTrimHistory opens a RocksDB handle similar to `OpenRocksDB`, +// but it also trims the versions newer than target one, such that it can be used +// for rollback. +func OpenRocksDBAndTrimHistory(dataDir string, version int64) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { + var ts [TimestampSize]byte + binary.LittleEndian.PutUint64(ts[:], uint64(version)) + + opts := grocksdb.NewDefaultOptions() + opts.SetCreateIfMissing(true) + opts.SetCreateIfMissingColumnFamilies(true) + + db, cfHandles, err := grocksdb.OpenDbAndTrimHistory( + opts, + dataDir, + []string{ + CFNameDefault, + CFNameStateStorage, + }, + []*grocksdb.Options{ + opts, + NewRocksDBOpts(false), + }, + ts[:], + ) + if err != nil { + return nil, nil, err + } + + return db, cfHandles[1], nil +} diff --git a/store/storage/sqlite/batch.go b/store/storage/sqlite/batch.go new file mode 100644 index 0000000000..588ad51295 --- /dev/null +++ b/store/storage/sqlite/batch.go @@ -0,0 +1,94 @@ +package sqlite + +import ( + "database/sql" + "fmt" + + "cosmossdk.io/store/v2" +) + +var _ store.Batch = (*Batch)(nil) + +type batchAction int + +const ( + batchActionSet batchAction = 0 + batchActionDel batchAction = 1 +) + +type batchOp struct { + action batchAction + storeKey string + key, value []byte +} + +type Batch struct { + tx *sql.Tx + ops []batchOp + size int + version uint64 +} + +func NewBatch(storage *sql.DB, version uint64) (*Batch, error) { + tx, err := storage.Begin() + if err != nil { + return nil, fmt.Errorf("failed to create SQL transaction: %w", err) + } + + return &Batch{ + tx: tx, + ops: make([]batchOp, 0), + version: version, + }, nil +} + +func (b *Batch) Size() int { + return b.size +} + +func (b *Batch) Reset() { + b.ops = nil + b.ops = make([]batchOp, 0) + b.size = 0 +} + +func (b *Batch) Set(storeKey string, key, value []byte) error { + b.size += len(key) + len(value) + b.ops = append(b.ops, batchOp{action: batchActionSet, storeKey: storeKey, key: key, value: value}) + return nil +} + +func (b *Batch) Delete(storeKey string, key []byte) error { + b.size += len(key) + b.ops = append(b.ops, batchOp{action: batchActionDel, storeKey: storeKey, key: key}) + return nil +} + +func (b *Batch) Write() error { + _, err := b.tx.Exec(latestVersionStmt, reservedStoreKey, keyLatestHeight, b.version, 0, b.version) + if err != nil { + return fmt.Errorf("failed to exec SQL statement: %w", err) + } + + for _, op := range b.ops { + switch op.action { + case batchActionSet: + _, err := b.tx.Exec(upsertStmt, op.storeKey, op.key, op.value, b.version, op.value) + if err != nil { + return fmt.Errorf("failed to exec SQL statement: %w", err) + } + + case batchActionDel: + _, err := b.tx.Exec(delStmt, b.version, op.storeKey, op.key, b.version) + if err != nil { + return fmt.Errorf("failed to exec SQL statement: %w", err) + } + } + } + + if err := b.tx.Commit(); err != nil { + return fmt.Errorf("failed to write SQL transaction: %w", err) + } + + return nil +} diff --git a/store/storage/sqlite/db.go b/store/storage/sqlite/db.go new file mode 100644 index 0000000000..0886b023f8 --- /dev/null +++ b/store/storage/sqlite/db.go @@ -0,0 +1,245 @@ +package sqlite + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "path/filepath" + "strings" + + _ "modernc.org/sqlite" + + "cosmossdk.io/store/v2" +) + +const ( + driverName = "sqlite" + dbName = "ss.db" + reservedStoreKey = "_RESERVED_" + keyLatestHeight = "latest_height" + + latestVersionStmt = ` + INSERT INTO state_storage(store_key, key, value, version) + VALUES(?, ?, ?, ?) + ON CONFLICT(store_key, key, version) DO UPDATE SET + value = ?; + ` + upsertStmt = ` + INSERT INTO state_storage(store_key, key, value, version) + VALUES(?, ?, ?, ?) + ON CONFLICT(store_key, key, version) DO UPDATE SET + value = ?; + ` + delStmt = ` + UPDATE state_storage SET tombstone = ? + WHERE id = ( + SELECT id FROM state_storage WHERE store_key = ? AND key = ? AND version <= ? ORDER BY version DESC LIMIT 1 + ) AND tombstone = 0; + ` +) + +var _ store.VersionedDatabase = (*Database)(nil) + +type Database struct { + storage *sql.DB +} + +func New(dataDir string) (*Database, error) { + db, err := sql.Open(driverName, filepath.Join(dataDir, dbName)) + if err != nil { + return nil, fmt.Errorf("failed to open sqlite DB: %w", err) + } + + stmt := ` + CREATE TABLE IF NOT EXISTS state_storage ( + id integer not null primary key, + store_key varchar not null, + key varchar not null, + value varchar not null, + version integer unsigned not null, + tombstone integer unsigned default 0, + unique (store_key, key, version) + ); + + CREATE UNIQUE INDEX IF NOT EXISTS idx_store_key_version ON state_storage (store_key, key, version); + ` + _, err = db.Exec(stmt) + if err != nil { + return nil, fmt.Errorf("failed to exec SQL statement: %w", err) + } + + return &Database{ + storage: db, + }, nil +} + +func (db *Database) Close() error { + err := db.storage.Close() + db.storage = nil + return err +} + +func (db *Database) GetLatestVersion() (uint64, error) { + stmt, err := db.storage.Prepare("SELECT value FROM state_storage WHERE store_key = ? AND key = ?") + if err != nil { + return 0, fmt.Errorf("failed to prepare SQL statement: %w", err) + } + + defer stmt.Close() + + var latestHeight uint64 + if err := stmt.QueryRow(reservedStoreKey, keyLatestHeight).Scan(&latestHeight); err != nil { + if errors.Is(err, sql.ErrNoRows) { + // in case of a fresh database + return 0, nil + } + + return 0, fmt.Errorf("failed to query row: %w", err) + } + + return latestHeight, nil +} + +func (db *Database) SetLatestVersion(version uint64) error { + _, err := db.storage.Exec(latestVersionStmt, reservedStoreKey, keyLatestHeight, version, 0, version) + if err != nil { + return fmt.Errorf("failed to exec SQL statement: %w", err) + } + + return nil +} + +func (db *Database) Has(storeKey string, version uint64, key []byte) (bool, error) { + val, err := db.Get(storeKey, version, key) + if err != nil { + return false, err + } + + return val != nil, nil +} + +func (db *Database) Get(storeKey string, targetVersion uint64, key []byte) ([]byte, error) { + stmt, err := db.storage.Prepare(` + SELECT value, tombstone FROM state_storage + WHERE store_key = ? AND key = ? AND version <= ? + ORDER BY version DESC LIMIT 1; + `) + if err != nil { + return nil, fmt.Errorf("failed to prepare SQL statement: %w", err) + } + + defer stmt.Close() + + var ( + value []byte + tomb uint64 + ) + if err := stmt.QueryRow(storeKey, key, targetVersion).Scan(&value, &tomb); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + + return nil, fmt.Errorf("failed to query row: %w", err) + } + + // A tombstone of zero or a target version that is less than the tombstone + // version means the key is not deleted at the target version. + if tomb == 0 || targetVersion < tomb { + return value, nil + } + + // the value is considered deleted + return nil, nil +} + +func (db *Database) ApplyChangeset(version uint64, cs *store.Changeset) error { + b, err := NewBatch(db.storage, version) + if err != nil { + return err + } + + for _, kvPair := range cs.Pairs { + if kvPair.Value == nil { + if err := b.Delete(kvPair.StoreKey, kvPair.Key); err != nil { + return err + } + } else { + if err := b.Set(kvPair.StoreKey, kvPair.Key, kvPair.Value); err != nil { + return err + } + } + } + + return b.Write() +} + +func (db *Database) Prune(version uint64) error { + stmt := "DELETE FROM state_storage WHERE version <= ? AND store_key != ?;" + + _, err := db.storage.Exec(stmt, version, reservedStoreKey) + if err != nil { + return fmt.Errorf("failed to exec SQL statement: %w", err) + } + + return nil +} + +func (db *Database) Iterator(storeKey string, version uint64, start, end []byte) (store.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, store.ErrKeyEmpty + } + + if start != nil && end != nil && bytes.Compare(start, end) > 0 { + return nil, store.ErrStartAfterEnd + } + + return newIterator(db.storage, storeKey, version, start, end, false) +} + +func (db *Database) ReverseIterator(storeKey string, version uint64, start, end []byte) (store.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, store.ErrKeyEmpty + } + + if start != nil && end != nil && bytes.Compare(start, end) > 0 { + return nil, store.ErrStartAfterEnd + } + + return newIterator(db.storage, storeKey, version, start, end, true) +} + +func (db *Database) PrintRowsDebug() { + stmt, err := db.storage.Prepare("SELECT store_key, key, value, version, tombstone FROM state_storage") + if err != nil { + panic(fmt.Errorf("failed to prepare SQL statement: %w", err)) + } + + defer stmt.Close() + + rows, err := stmt.Query() + if err != nil { + panic(fmt.Errorf("failed to execute SQL query: %w", err)) + } + + var sb strings.Builder + for rows.Next() { + var ( + storeKey string + key []byte + value []byte + version uint64 + tomb uint64 + ) + if err := rows.Scan(&storeKey, &key, &value, &version, &tomb); err != nil { + panic(fmt.Sprintf("failed to scan row: %s", err)) + } + + sb.WriteString(fmt.Sprintf("STORE_KEY: %s, KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", storeKey, key, value, version, tomb)) + } + if err := rows.Err(); err != nil { + panic(fmt.Errorf("received unexpected error: %w", err)) + } + + fmt.Println(strings.TrimSpace(sb.String())) +} diff --git a/store/storage/sqlite/db_test.go b/store/storage/sqlite/db_test.go new file mode 100644 index 0000000000..60d258626a --- /dev/null +++ b/store/storage/sqlite/db_test.go @@ -0,0 +1,89 @@ +package sqlite + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/storage" +) + +const ( + storeKey1 = "store1" +) + +func TestStorageTestSuite(t *testing.T) { + s := &storage.StorageTestSuite{ + NewDB: func(dir string) (store.VersionedDatabase, error) { + return New(dir) + }, + EmptyBatchSize: 0, + } + suite.Run(t, s) +} + +func TestDatabase_ReverseIterator(t *testing.T) { + db, err := New(t.TempDir()) + require.NoError(t, err) + defer db.Close() + + cs := new(store.Changeset) + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(key), Value: []byte(val)}) + } + + require.NoError(t, db.ApplyChangeset(1, cs)) + + // reverse iterator without an end key + iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) + require.NoError(t, err) + + defer iter.Close() + + i, count := 99, 0 + for ; iter.Valid(); iter.Next() { + require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key()) + require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value()) + + i-- + count++ + } + require.Equal(t, 100, count) + require.NoError(t, iter.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + require.False(t, iter.Next()) + require.False(t, iter.Valid()) + + // reverse iterator with with a start and end domain + iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019")) + require.NoError(t, err) + + defer iter2.Close() + + i, count = 18, 0 + for ; iter2.Valid(); iter2.Next() { + require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key()) + require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value()) + + i-- + count++ + } + require.Equal(t, 9, count) + require.NoError(t, iter2.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + require.False(t, iter2.Next()) + require.False(t, iter2.Valid()) + + // start must be <= end + iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019")) + require.Error(t, err) + require.Nil(t, iter3) +} diff --git a/store/storage/sqlite/iterator.go b/store/storage/sqlite/iterator.go new file mode 100644 index 0000000000..8e31200e36 --- /dev/null +++ b/store/storage/sqlite/iterator.go @@ -0,0 +1,173 @@ +package sqlite + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "golang.org/x/exp/slices" + _ "modernc.org/sqlite" + + "cosmossdk.io/store/v2" +) + +var _ store.Iterator = (*iterator)(nil) + +type iterator struct { + statement *sql.Stmt + rows *sql.Rows + key, val []byte + start, end []byte + valid bool + err error +} + +func newIterator(storage *sql.DB, storeKey string, targetVersion uint64, start, end []byte, reverse bool) (*iterator, error) { + var ( + keyClause = []string{"store_key = ?", "version <= ?"} + queryArgs []any + ) + + switch { + case len(start) > 0 && len(end) > 0: + keyClause = append(keyClause, "key >= ?", "key < ?") + queryArgs = []any{storeKey, targetVersion, start, end, targetVersion} + + case len(start) > 0 && len(end) == 0: + keyClause = append(keyClause, "key >= ?") + queryArgs = []any{storeKey, targetVersion, start, targetVersion} + + case len(start) == 0 && len(end) > 0: + keyClause = append(keyClause, "key < ?") + queryArgs = []any{storeKey, targetVersion, end, targetVersion} + + default: + queryArgs = []any{storeKey, targetVersion, targetVersion} + } + + orderBy := "ASC" + if reverse { + orderBy = "DESC" + } + + // Note, this is not susceptible to SQL injection because placeholders are used + // for parts of the query outside the store's direct control. + stmt, err := storage.Prepare(fmt.Sprintf(` + SELECT x.key, x.value + FROM ( + SELECT key, value, version, tombstone, + row_number() OVER (PARTITION BY key ORDER BY version DESC) AS _rn + FROM state_storage WHERE %s + ) x + WHERE x._rn = 1 AND (x.tombstone = 0 OR x.tombstone > ?) ORDER BY x.key %s; + `, strings.Join(keyClause, " AND "), orderBy)) + if err != nil { + return nil, fmt.Errorf("failed to prepare SQL statement: %w", err) + } + + rows, err := stmt.Query(queryArgs...) + if err != nil { + _ = stmt.Close() + return nil, fmt.Errorf("failed to execute SQL query: %w", err) + } + + itr := &iterator{ + statement: stmt, + rows: rows, + start: start, + end: end, + valid: rows.Next(), + } + if !itr.valid { + itr.err = fmt.Errorf("iterator invalid: %w", sql.ErrNoRows) + return itr, nil + } + + // read the first row + itr.parseRow() + if !itr.valid { + return itr, nil + } + + return itr, nil +} + +func (itr *iterator) Close() { + _ = itr.statement.Close() + itr.valid = false + itr.statement = nil + itr.rows = nil +} + +// Domain returns the domain of the iterator. The caller must not modify the +// return values. +func (itr *iterator) Domain() ([]byte, []byte) { + return itr.start, itr.end +} + +func (itr *iterator) Key() []byte { + itr.assertIsValid() + return slices.Clone(itr.key) +} + +func (itr *iterator) Value() []byte { + itr.assertIsValid() + return slices.Clone(itr.val) +} + +func (itr *iterator) Valid() bool { + if !itr.valid || itr.rows.Err() != nil { + itr.valid = false + return itr.valid + } + + // if key is at the end or past it, consider it invalid + if end := itr.end; end != nil { + if bytes.Compare(end, itr.Key()) <= 0 { + itr.valid = false + return itr.valid + } + } + + return true +} + +func (itr *iterator) Next() bool { + if itr.rows.Next() { + itr.parseRow() + return itr.Valid() + } + + itr.valid = false + return itr.valid +} + +func (itr *iterator) Error() error { + if err := itr.rows.Err(); err != nil { + return err + } + + return itr.err +} + +func (itr *iterator) parseRow() { + var ( + key []byte + value []byte + ) + if err := itr.rows.Scan(&key, &value); err != nil { + itr.err = fmt.Errorf("failed to scan row: %s", err) + itr.valid = false + return + } + + itr.key = key + itr.val = value +} + +func (itr *iterator) assertIsValid() { + if !itr.valid { + panic("iterator is invalid") + } +} diff --git a/store/storage/storage_bench_test.go b/store/storage/storage_bench_test.go new file mode 100644 index 0000000000..59c4d1994a --- /dev/null +++ b/store/storage/storage_bench_test.go @@ -0,0 +1,173 @@ +//go:build rocksdb +// +build rocksdb + +package storage + +import ( + "bytes" + "fmt" + "math/rand" + "sort" + "testing" + + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/storage/pebbledb" + "cosmossdk.io/store/v2/storage/rocksdb" + "cosmossdk.io/store/v2/storage/sqlite" + "github.com/stretchr/testify/require" +) + +var ( + backends = map[string]func(dataDir string) (store.VersionedDatabase, error){ + "rocksdb_versiondb_opts": func(dataDir string) (store.VersionedDatabase, error) { + return rocksdb.New(dataDir) + }, + "pebbledb_default_opts": func(dataDir string) (store.VersionedDatabase, error) { + return pebbledb.New(dataDir) + }, + "btree_sqlite": func(dataDir string) (store.VersionedDatabase, error) { + return sqlite.New(dataDir) + }, + } + rng = rand.New(rand.NewSource(567320)) +) + +func BenchmarkGet(b *testing.B) { + numKeyVals := 1_000_000 + keys := make([][]byte, numKeyVals) + vals := make([][]byte, numKeyVals) + for i := 0; i < numKeyVals; i++ { + key := make([]byte, 128) + val := make([]byte, 128) + + _, err := rng.Read(key) + require.NoError(b, err) + _, err = rng.Read(val) + require.NoError(b, err) + + keys[i] = key + vals[i] = val + } + + for ty, fn := range backends { + db, err := fn(b.TempDir()) + require.NoError(b, err) + defer func() { + _ = db.Close() + }() + + cs := new(store.Changeset) + for i := 0; i < numKeyVals; i++ { + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: keys[i], Value: vals[i]}) + } + + require.NoError(b, db.ApplyChangeset(1, cs)) + + b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + key := keys[rng.Intn(len(keys))] + + b.StartTimer() + _, err = db.Get(storeKey1, 1, key) + require.NoError(b, err) + } + }) + } +} + +func BenchmarkApplyChangeset(b *testing.B) { + for ty, fn := range backends { + db, err := fn(b.TempDir()) + require.NoError(b, err) + defer func() { + _ = db.Close() + }() + + b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + + cs := new(store.Changeset) + for j := 0; j < 1000; j++ { + key := make([]byte, 128) + val := make([]byte, 128) + + _, err = rng.Read(key) + require.NoError(b, err) + _, err = rng.Read(val) + require.NoError(b, err) + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: key, Value: val}) + } + + b.StartTimer() + require.NoError(b, db.ApplyChangeset(uint64(b.N+1), cs)) + } + }) + } +} + +func BenchmarkIterate(b *testing.B) { + numKeyVals := 1_000_000 + keys := make([][]byte, numKeyVals) + vals := make([][]byte, numKeyVals) + for i := 0; i < numKeyVals; i++ { + key := make([]byte, 128) + val := make([]byte, 128) + + _, err := rng.Read(key) + require.NoError(b, err) + _, err = rng.Read(val) + require.NoError(b, err) + + keys[i] = key + vals[i] = val + + } + + for ty, fn := range backends { + db, err := fn(b.TempDir()) + require.NoError(b, err) + defer func() { + _ = db.Close() + }() + + b.StopTimer() + + cs := new(store.Changeset) + for i := 0; i < numKeyVals; i++ { + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: keys[i], Value: vals[i]}) + } + + require.NoError(b, db.ApplyChangeset(1, cs)) + + sort.Slice(keys, func(i, j int) bool { + return bytes.Compare(keys[i], keys[j]) < 0 + }) + + b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { + b.ResetTimer() + + for i := 0; i < b.N; i++ { + b.StopTimer() + + itr, err := db.Iterator(storeKey1, 1, keys[0], nil) + require.NoError(b, err) + + b.StartTimer() + + for ; itr.Valid(); itr.Next() { + _ = itr.Key() + _ = itr.Value() + } + + require.NoError(b, itr.Error()) + } + }) + } +} diff --git a/store/storage/storage_test_suite.go b/store/storage/storage_test_suite.go new file mode 100644 index 0000000000..72fac87387 --- /dev/null +++ b/store/storage/storage_test_suite.go @@ -0,0 +1,485 @@ +package storage + +import ( + "fmt" + "slices" + + "github.com/stretchr/testify/suite" + + "cosmossdk.io/store/v2" +) + +const ( + storeKey1 = "store1" +) + +// StorageTestSuite defines a reusable test suite for all storage backends. +type StorageTestSuite struct { + suite.Suite + + NewDB func(dir string) (store.VersionedDatabase, error) + EmptyBatchSize int + SkipTests []string +} + +func (s *StorageTestSuite) TestDatabase_Close() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + s.Require().NoError(db.Close()) + + // close should not be idempotent + s.Require().Panics(func() { _ = db.Close() }) +} + +func (s *StorageTestSuite) TestDatabase_LatestVersion() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + lv, err := db.GetLatestVersion() + s.Require().NoError(err) + s.Require().Zero(lv) + + for i := uint64(1); i <= 1001; i++ { + err = db.SetLatestVersion(i) + s.Require().NoError(err) + + lv, err = db.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(i, lv) + } +} + +func (s *StorageTestSuite) TestDatabase_VersionedKeys() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + for i := uint64(1); i <= 100; i++ { + s.Require().NoError(db.ApplyChangeset(i, store.NewChangeset( + store.KVPair{StoreKey: storeKey1, Key: []byte("key"), Value: []byte(fmt.Sprintf("value%03d", i))}, + ))) + } + + for i := uint64(1); i <= 100; i++ { + bz, err := db.Get(storeKey1, i, []byte("key")) + s.Require().NoError(err) + s.Require().Equal(fmt.Sprintf("value%03d", i), string(bz)) + } +} + +func (s *StorageTestSuite) TestDatabase_GetVersionedKey() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + // store a key at version 1 + s.Require().NoError(db.ApplyChangeset(1, store.NewChangeset( + store.KVPair{StoreKey: storeKey1, Key: []byte("key"), Value: []byte("value001")}, + ))) + + // assume chain progresses to version 10 w/o any changes to key + bz, err := db.Get(storeKey1, 10, []byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("value001"), bz) + + ok, err := db.Has(storeKey1, 10, []byte("key")) + s.Require().NoError(err) + s.Require().True(ok) + + // chain progresses to version 11 with an update to key + s.Require().NoError(db.ApplyChangeset(11, store.NewChangeset( + store.KVPair{StoreKey: storeKey1, Key: []byte("key"), Value: []byte("value011")}, + ))) + + bz, err = db.Get(storeKey1, 10, []byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("value001"), bz) + + ok, err = db.Has(storeKey1, 10, []byte("key")) + s.Require().NoError(err) + s.Require().True(ok) + + for i := uint64(11); i <= 14; i++ { + bz, err = db.Get(storeKey1, i, []byte("key")) + s.Require().NoError(err) + s.Require().Equal([]byte("value011"), bz) + + ok, err = db.Has(storeKey1, i, []byte("key")) + s.Require().NoError(err) + s.Require().True(ok) + } + + // chain progresses to version 15 with a delete to key + s.Require().NoError(db.ApplyChangeset(15, store.NewChangeset( + store.KVPair{StoreKey: storeKey1, Key: []byte("key")}, + ))) + + // all queries up to version 14 should return the latest value + for i := uint64(1); i <= 14; i++ { + bz, err = db.Get(storeKey1, i, []byte("key")) + s.Require().NoError(err) + s.Require().NotNil(bz) + + ok, err = db.Has(storeKey1, i, []byte("key")) + s.Require().NoError(err) + s.Require().True(ok) + } + + // all queries after version 15 should return nil + for i := uint64(15); i <= 17; i++ { + bz, err = db.Get(storeKey1, i, []byte("key")) + s.Require().NoError(err) + s.Require().Nil(bz) + + ok, err = db.Has(storeKey1, i, []byte("key")) + s.Require().NoError(err) + s.Require().False(ok) + } +} + +func (s *StorageTestSuite) TestDatabase_ApplyChangeset() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + cs := new(store.Changeset) + for i := 0; i < 100; i++ { + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(fmt.Sprintf("key%03d", i)), Value: []byte("value")}) + } + + for i := 0; i < 100; i++ { + if i%10 == 0 { + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(fmt.Sprintf("key%03d", i))}) + } + } + + s.Require().NoError(db.ApplyChangeset(1, cs)) + + lv, err := db.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(1), lv) + + for i := 0; i < 1; i++ { + ok, err := db.Has(storeKey1, 1, []byte(fmt.Sprintf("key%03d", i))) + s.Require().NoError(err) + + if i%10 == 0 { + s.Require().False(ok) + } else { + s.Require().True(ok) + } + } +} + +func (s *StorageTestSuite) TestDatabase_IteratorEmptyDomain() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + iter, err := db.Iterator(storeKey1, 1, []byte{}, []byte{}) + s.Require().Error(err) + s.Require().Nil(iter) +} + +func (s *StorageTestSuite) TestDatabase_IteratorClose() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + iter, err := db.Iterator(storeKey1, 1, []byte("key000"), nil) + s.Require().NoError(err) + iter.Close() + + s.Require().False(iter.Valid()) + s.Require().Panics(func() { iter.Close() }) +} + +func (s *StorageTestSuite) TestDatabase_IteratorDomain() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + testCases := map[string]struct { + start, end []byte + }{ + "start without end domain": { + start: []byte("key010"), + }, + "start and end domain": { + start: []byte("key010"), + end: []byte("key020"), + }, + } + + for name, tc := range testCases { + s.Run(name, func() { + iter, err := db.Iterator(storeKey1, 1, tc.start, tc.end) + s.Require().NoError(err) + + defer iter.Close() + + start, end := iter.Domain() + s.Require().Equal(tc.start, start) + s.Require().Equal(tc.end, end) + }) + } +} + +func (s *StorageTestSuite) TestDatabase_Iterator() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + cs := new(store.Changeset) + for i := 0; i < 100; i++ { + key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 + val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(key), Value: []byte(val)}) + } + + s.Require().NoError(db.ApplyChangeset(1, cs)) + + // iterator without an end key over multiple versions + for v := uint64(1); v < 5; v++ { + itr, err := db.Iterator(storeKey1, v, []byte("key000"), nil) + s.Require().NoError(err) + + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(100, count) + s.Require().NoError(itr.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr.Next()) + s.Require().False(itr.Valid()) + } + + // iterator with with a start and end domain over multiple versions + for v := uint64(1); v < 5; v++ { + itr2, err := db.Iterator(storeKey1, v, []byte("key010"), []byte("key019")) + s.Require().NoError(err) + + defer itr2.Close() + + i, count := 10, 0 + for ; itr2.Valid(); itr2.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr2.Key()) + s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr2.Value()) + + i++ + count++ + } + s.Require().Equal(9, count) + s.Require().NoError(itr2.Error()) + + // seek past domain, which should make the iterator invalid and produce an error + s.Require().False(itr2.Next()) + s.Require().False(itr2.Valid()) + } + + // start must be <= end + iter3, err := db.Iterator(storeKey1, 1, []byte("key020"), []byte("key019")) + s.Require().Error(err) + s.Require().Nil(iter3) +} + +func (s *StorageTestSuite) TestDatabase_Iterator_RangedDeletes() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + s.Require().NoError(db.ApplyChangeset(1, store.NewChangeset( + store.KVPair{StoreKey: storeKey1, Key: []byte("key001"), Value: []byte("value001")}, + store.KVPair{StoreKey: storeKey1, Key: []byte("key002"), Value: []byte("value001")}, + ))) + + s.Require().NoError(db.ApplyChangeset(5, store.NewChangeset( + store.KVPair{StoreKey: storeKey1, Key: []byte("key002"), Value: []byte("value002")}, + ))) + + s.Require().NoError(db.ApplyChangeset(10, store.NewChangeset( + store.KVPair{StoreKey: storeKey1, Key: []byte("key002")}, + ))) + + itr, err := db.Iterator(storeKey1, 11, []byte("key001"), nil) + s.Require().NoError(err) + + defer itr.Close() + + // there should only be one valid key in the iterator -- key001 + var count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte("key001"), itr.Key()) + count++ + } + s.Require().Equal(1, count) + s.Require().NoError(itr.Error()) +} + +func (s *StorageTestSuite) TestDatabase_IteratorMultiVersion() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + // for versions 1-49, set all 10 keys + for v := uint64(1); v < 50; v++ { + cs := new(store.Changeset) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%03d", i) + val := fmt.Sprintf("val%03d-%03d", i, v) + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(key), Value: []byte(val)}) + } + + s.Require().NoError(db.ApplyChangeset(v, cs)) + } + + // for versions 50-100, only update even keys + for v := uint64(50); v <= 100; v++ { + cs := new(store.Changeset) + for i := 0; i < 10; i++ { + if i%2 == 0 { + key := fmt.Sprintf("key%03d", i) + val := fmt.Sprintf("val%03d-%03d", i, v) + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(key), Value: []byte(val)}) + } + } + + s.Require().NoError(db.ApplyChangeset(v, cs)) + } + + itr, err := db.Iterator(storeKey1, 69, []byte("key000"), nil) + s.Require().NoError(err) + + defer itr.Close() + + // All keys should be present; All odd keys should have a value that reflects + // version 49, and all even keys should have a value that reflects the desired + // version, 69. + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + + if i%2 == 0 { + s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 69)), itr.Value()) + } else { + s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 49)), itr.Value()) + } + + i = (i + 1) % 10 + count++ + } + s.Require().Equal(10, count) + s.Require().NoError(itr.Error()) +} + +func (s *StorageTestSuite) TestDatabase_IteratorNoDomain() { + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + // for versions 1-50, set all 10 keys + for v := uint64(1); v <= 50; v++ { + cs := new(store.Changeset) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%03d", i) + val := fmt.Sprintf("val%03d-%03d", i, v) + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(key), Value: []byte(val)}) + } + + s.Require().NoError(db.ApplyChangeset(v, cs)) + } + + // create an iterator over the entire domain + itr, err := db.Iterator(storeKey1, 50, nil, nil) + s.Require().NoError(err) + + defer itr.Close() + + var i, count int + for ; itr.Valid(); itr.Next() { + s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) + s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 50)), itr.Value()) + + i++ + count++ + } + s.Require().Equal(10, count) + s.Require().NoError(itr.Error()) +} + +func (s *StorageTestSuite) TestDatabase_Prune() { + if slices.Contains(s.SkipTests, s.T().Name()) { + s.T().SkipNow() + } + + db, err := s.NewDB(s.T().TempDir()) + s.Require().NoError(err) + defer db.Close() + + // for versions 1-50, set 10 keys + for v := uint64(1); v <= 50; v++ { + cs := new(store.Changeset) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%03d", i) + val := fmt.Sprintf("val%03d-%03d", i, v) + + cs.AddKVPair(store.KVPair{StoreKey: storeKey1, Key: []byte(key), Value: []byte(val)}) + } + + s.Require().NoError(db.ApplyChangeset(v, cs)) + } + + // prune the first 25 versions + s.Require().NoError(db.Prune(25)) + + latestVersion, err := db.GetLatestVersion() + s.Require().NoError(err) + s.Require().Equal(uint64(50), latestVersion) + + // Ensure all keys are no longer present up to and including version 25 and + // all keys are present after version 25. + for v := uint64(1); v <= 50; v++ { + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%03d", i) + val := fmt.Sprintf("val%03d-%03d", i, v) + + bz, err := db.Get(storeKey1, v, []byte(key)) + s.Require().NoError(err) + if v <= 25 { + s.Require().Nil(bz) + } else { + s.Require().Equal([]byte(val), bz) + } + } + } + + itr, err := db.Iterator(storeKey1, 25, []byte("key000"), nil) + s.Require().NoError(err) + s.Require().False(itr.Valid()) + + // prune the latest version which should prune the entire dataset + s.Require().NoError(db.Prune(50)) + + for v := uint64(1); v <= 50; v++ { + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key%03d", i) + + bz, err := db.Get(storeKey1, v, []byte(key)) + s.Require().NoError(err) + s.Require().Nil(bz) + } + } +} diff --git a/store/storage/util/iterator.go b/store/storage/util/iterator.go new file mode 100644 index 0000000000..fe207314c7 --- /dev/null +++ b/store/storage/util/iterator.go @@ -0,0 +1,53 @@ +package util + +// IterateWithPrefix returns the begin and end keys for an iterator over a domain +// and prefix. +func IterateWithPrefix(prefix, begin, end []byte) ([]byte, []byte) { + if len(prefix) == 0 { + return begin, end + } + + begin = cloneAppend(prefix, begin) + + if end == nil { + end = CopyIncr(prefix) + } else { + end = cloneAppend(prefix, end) + } + + return begin, end +} + +func cloneAppend(front, tail []byte) (res []byte) { + res = make([]byte, len(front)+len(tail)) + + n := copy(res, front) + copy(res[n:], tail) + + return res +} + +func CopyIncr(bz []byte) []byte { + if len(bz) == 0 { + panic("copyIncr expects non-zero bz length") + } + + ret := make([]byte, len(bz)) + copy(ret, bz) + + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i]++ + return ret + } + + ret[i] = byte(0x00) + + if i == 0 { + // overflow + return nil + } + } + + return nil +} diff --git a/store/store.go b/store/store.go index e674575463..3f23fc6734 100644 --- a/store/store.go +++ b/store/store.go @@ -1,19 +1,123 @@ package store import ( - dbm "github.com/cosmos/cosmos-db" + "io" - "cosmossdk.io/log" - "cosmossdk.io/store/cache" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/rootmulti" - "cosmossdk.io/store/types" + ics23 "github.com/cosmos/ics23/go" ) -func NewCommitMultiStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) types.CommitMultiStore { - return rootmulti.NewStore(db, logger, metricGatherer) +// TODO: Move relevant types to the 'core' package. + +// StoreType defines a type of KVStore. +type StoreType int + +// Sentinel store types. +const ( + StoreTypeBranch StoreType = iota + StoreTypeTrace + StoreTypeMem +) + +// RootStore defines an abstraction layer containing a State Storage (SS) engine +// and one or more State Commitment (SC) engines. +type RootStore interface { + GetSCStore(storeKey string) Tree + MountSCStore(storeKey string, sc Tree) error + GetKVStore(storeKey string) KVStore + GetBranchedKVStore(storeKey string) BranchedKVStore + + GetProof(storeKey string, version uint64, key []byte) (*ics23.CommitmentProof, error) + + Branch() BranchedRootStore + + SetTracingContext(tc TraceContext) + SetTracer(w io.Writer) + TracingEnabled() bool + + LoadVersion(version uint64) error + LoadLatestVersion() error + GetLatestVersion() (uint64, error) + + WorkingHash() ([]byte, error) + SetCommitHeader(h CommitHeader) + Commit() ([]byte, error) + + // TODO: + // + // - Queries + // + // Ref: https://github.com/cosmos/cosmos-sdk/issues/17314 + + io.Closer } -func NewCommitKVStoreCacheManager() types.MultiStorePersistentCache { - return cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) +// BranchedRootStore defines an extension of the RootStore interface that allows +// for nested branching and flushing of writes. It extends RootStore by allowing +// a caller to call Branch() which should return a BranchedRootStore that has all +// internal relevant KV stores branched. A caller can then call Write() on the +// BranchedRootStore which will flush all changesets to the parent RootStore's +// internal KV stores. +type BranchedRootStore interface { + RootStore + + Write() +} + +// KVStore defines the core storage primitive for modules to read and write state. +type KVStore interface { + GetStoreKey() string + + // GetStoreType returns the concrete store type. + GetStoreType() StoreType + + // Get returns a value for a given key from the store. + Get(key []byte) []byte + + // Has checks if a key exists. + Has(key []byte) bool + + // Set sets a key/value entry to the store. + Set(key, value []byte) + + // Delete deletes the key from the store. + Delete(key []byte) + + // GetChangeset returns the ChangeSet, if any, for the branched state. This + // should contain all writes that are marked to be flushed and committed during + // Commit(). + GetChangeset() *Changeset + + // Reset resets the store, which is implementation dependent. + Reset() error + + // Iterator creates a new Iterator over the domain [start, end). Note: + // + // - Start must be less than end + // - The iterator must be closed by caller + // - To iterate over entire domain, use store.Iterator(nil, nil) + // + // CONTRACT: No writes may happen within a domain while an iterator exists over + // it, with the exception of a branched/cached KVStore. + Iterator(start, end []byte) Iterator + + // ReverseIterator creates a new reverse Iterator over the domain [start, end). + // It has the some properties and contracts as Iterator. + ReverseIterator(start, end []byte) Iterator +} + +// BranchedKVStore defines an interface for a branched a KVStore. It extends KVStore +// by allowing dirty entries to be flushed to the underlying KVStore or discarded +// altogether. A BranchedKVStore can itself be branched, allowing for nested branching +// where writes are flushed up the branched stack. +type BranchedKVStore interface { + KVStore + + // Write flushes writes to the underlying store. + Write() + + // Branch recursively wraps. + Branch() BranchedKVStore + + // BranchWithTrace recursively wraps with tracing enabled. + BranchWithTrace(w io.Writer, tc TraceContext) BranchedKVStore } diff --git a/store/streaming/README.md b/store/streaming/README.md deleted file mode 100644 index faa304dec0..0000000000 --- a/store/streaming/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Cosmos-SDK Plugins - -This package contains an extensible plugin system for the Cosmos-SDK. The plugin system leverages the [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin) system. This system is designed to work over RPC. - -Although the `go-plugin` is built to work over RPC, it is currently only designed to work over a local network. - -## Pre requisites - -For an overview of supported features by the `go-plugin` system, please see https://github.com/hashicorp/go-plugin. The `go-plugin` documentation is located [here](https://github.com/hashicorp/go-plugin/tree/master/docs). You can also directly visit any of the links below: - -* [Writing plugins without Go](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md) -* [Go Plugin Tutorial](https://github.com/hashicorp/go-plugin/blob/master/docs/extensive-go-plugin-tutorial.md) -* [Plugin Internals](https://github.com/hashicorp/go-plugin/blob/master/docs/internals.md) -* [Plugin Architecture](https://www.youtube.com/watch?v=SRvm3zQQc1Q) (start here) - -## Exposing plugins - -To expose plugins to the plugin system, you will need to: - -1. Implement the gRPC message protocol service of the plugin -2. Build the plugin binary -3. Export it - -Read the plugin documentation in the [Streaming Plugins](#streaming-plugins) section for examples on how to build a plugin. - -## Streaming Plugins - -List of support streaming plugins - -* [ABCI State Streaming Plugin](abci/README.md) diff --git a/store/streaming/abci/README.md b/store/streaming/abci/README.md deleted file mode 100644 index 08aaf12e8a..0000000000 --- a/store/streaming/abci/README.md +++ /dev/null @@ -1,210 +0,0 @@ -# ABCI and State Streaming Plugin (gRPC) - -The `BaseApp` package contains the interface for a [ABCIListener](https://github.com/cosmos/cosmos-sdk/blob/main/baseapp/streaming.go) -service used to write state changes out from individual KVStores to external systems, -as described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-038-state-listening.md). - -Specific `ABCIListener` service implementations are written and loaded as [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin). - -## Implementation - -In this section we describe the implementation of the `ABCIListener` interface as a gRPC service. - -### Service Protocol - -The companion service protocol for the `ABCIListener` interface is described below. -See [proto/cosmos/store/streaming/abci/grpc.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/store/streaming/abci/grpc.proto) for full details. - -```protobuf reference -https://github.com/cosmos/cosmos-sdk/blob/6cee22df52eb0cbb30e351fbb41f66d26c1f8300/proto/cosmos/store/streaming/abci/grpc.proto#L1-L36 -``` - -### Generating the Code - -To generate the stubs the local client implementation can call, run the following command: - -```shell -make proto-gen -``` - -For other languages you'll need to [download](https://github.com/cosmos/cosmos-sdk/blob/main/third_party/proto/README.md) -the CosmosSDK protos into your project and compile. For language specific compilation instructions visit -[https://github.com/grpc](https://github.com/grpc) and look in the `examples` folder of your -language of choice `https://github.com/grpc/grpc-{language}/tree/master/examples` and [https://grpc.io](https://grpc.io) -for the documentation. - -### gRPC Client and Server - -Implementing the ABCIListener gRPC client and server is a simple and straight forward process. - -To create the client and server we create a `ListenerGRPCPlugin` struct that implements the -`plugin.GRPCPlugin` interface and a `Impl` property that will contain a concrete implementation -of the `ABCIListener` plugin written in Go. - -#### The Interface - -The `BaseApp` `ABCIListener` interface will be what will define the plugins capabilities. - -Boilerplate RPC implementation example of the `ABCIListener` interface. ([store/streaming/abci/grpc.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/abci/grpc.go)) - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/grpc.go#L13-L79 -``` - -Our `ABCIlistener` service plugin. ([store/streaming/plugins/abci/v1/interface.go](interface.go)) - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/interface.go#L13-L45 -``` - -#### Plugin Implementation - -Plugin implementations can be in a completely separate package but will need access -to the `ABCIListener` interface. One thing to note here is that plugin implementations -defined in the `ListenerGRPCPlugin.Impl` property are **only** required when building -plugins in Go. They are pre-compiled into Go modules. The `GRPCServer.Impl` calls methods -on this out-of-process plugin. - -For Go plugins this is all that is required to process data that is sent over gRPC. -This provides the advantage of writing quick plugins that process data to different -external systems (i.e: DB, File, DB, Kafka, etc.) without the need for implementing -the gRPC server endpoints. - -```go -// MyPlugin is the implementation of the ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type MyPlugin struct { - ... -} - -func (a FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { - // process data - return nil -} - -func (a FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { - // process data - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: v1.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &ABCIListenerGRPCPlugin{Impl: &MyPlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} -``` - -## Plugin Loading System - -A general purpose plugin loading system has been provided by the SDK to be able to load not just -the `ABCIListener` service plugin but other protocol services as well. You can take a look -at how plugins are loaded by the SDK in [store/streaming/streaming.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/streaming.go) - -You'll need to add this in your `app.go` - -```go -// app.go - -func NewApp(...) *App { - - ... - - // register streaming services - streamingCfg := cast.ToStringMap(appOpts.Get(baseapp.StreamingTomlKey)) - for service := range streamingCfg { - pluginKey := fmt.Sprintf("%s.%s.%s", baseapp.StreamingTomlKey, service, baseapp.StreamingABCIPluginTomlKey) - pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey))) - if len(pluginName) > 0 { - logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel)) - plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel) - if err != nil { - tmos.Exit(err.Error()) - } - if err := baseapp.RegisterStreamingPlugin(bApp, appOpts, keys, plugin); err != nil { - tmos.Exit(err.Error()) - } - } - } - - ... -} -``` - -## Configuration - -Update the streaming section in `app.toml` - -```toml -# Streaming allows nodes to stream state to external systems -[streaming] - -# streaming.abci specifies the configuration for the ABCI Listener streaming service -[streaming.abci] - -# List of kv store keys to stream out via gRPC -# Set to ["*"] to expose all keys. -keys = ["*"] - -# The plugin name used for streaming via gRPC -# Supported plugins: abci -plugin = "abci" - -# stop-node-on-err specifies whether to stop the node when the -stop-node-on-err = true -``` - -## Updating the protocol - -If you update the protocol buffers file, you can regenerate the file and plugins using the -following commands from the project root directory. You do not need to run this if you're -just trying the examples, you can skip ahead to the [Testing](#testing) section. - -```shell -make proto-gen -``` - -* stdout plugin; from inside the `store/` dir, run: - -```shell -go build -o streaming/abci/examples/stdout/stdout streaming/abci/examples/stdout/stdout.go -``` - -* file plugin (writes to `~/`); from inside the `store/` dir, run: - -```shell -go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go -``` - -### Testing - -Export a plugin from one of the Go or Python examples. - -* stdout plugin - -```shell -export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/stdout/stdout" -``` - -* file plugin (writes to ~/) - -```shell -export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/file/file" -``` - -where `{path to}` is the parent path to the `cosmos-sdk` repo on you system. - -Test: - -```shell -make test-sim-nondeterminism-streaming -``` - -The plugin system will look for the plugin binary in the `env` variable `COSMOS_SDK_{PLUGIN_NAME}` above -and if it does not find it, it will error out. The plugin UPPERCASE name is that of the -`streaming.abci.plugin` TOML configuration setting. diff --git a/store/streaming/abci/examples/file/.gitignore b/store/streaming/abci/examples/file/.gitignore deleted file mode 100644 index bc8ff79063..0000000000 --- a/store/streaming/abci/examples/file/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# ignore the file plugin binary -file \ No newline at end of file diff --git a/store/streaming/abci/examples/file/README.md b/store/streaming/abci/examples/file/README.md deleted file mode 100644 index 27e5f8956e..0000000000 --- a/store/streaming/abci/examples/file/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# File Plugin - -The file plugin is an example plugin written in Go. It is intended for local testing and should not be used in production environments. - -## Build - -To build the plugin run the following command: - -```shell -cd store -``` - -```shell -go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go -``` - -* The plugin will write files to the users home directory `~/`. diff --git a/store/streaming/abci/examples/file/file.go b/store/streaming/abci/examples/file/file.go deleted file mode 100644 index 150b8cafce..0000000000 --- a/store/streaming/abci/examples/file/file.go +++ /dev/null @@ -1,81 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "path/filepath" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/hashicorp/go-plugin" - - streamingabci "cosmossdk.io/store/streaming/abci" - store "cosmossdk.io/store/types" -) - -// FilePlugin is the implementation of the baseapp.ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type FilePlugin struct { - BlockHeight int64 -} - -func (a *FilePlugin) writeToFile(file string, data []byte) error { - home, err := os.UserHomeDir() - if err != nil { - return err - } - - filename := fmt.Sprintf("%s/%s.txt", home, file) - f, err := os.OpenFile(filepath.Clean(filename), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600) - if err != nil { - return err - } - - if _, err := f.Write(data); err != nil { - f.Close() // ignore error; Write error takes precedence - return err - } - - if err := f.Close(); err != nil { - return err - } - - return nil -} - -func (a *FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error { - d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) - d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) - if err := a.writeToFile("finalize-block-req", d1); err != nil { - return err - } - if err := a.writeToFile("finalize-block-res", d2); err != nil { - return err - } - return nil -} - -func (a *FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { - fmt.Printf("listen-commit: block_height=%d data=%v", res.RetainHeight, changeSet) - d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, res)) - d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, changeSet)) - if err := a.writeToFile("commit-res", d1); err != nil { - return err - } - if err := a.writeToFile("state-change", d2); err != nil { - return err - } - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: streamingabci.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &streamingabci.ListenerGRPCPlugin{Impl: &FilePlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/store/streaming/abci/examples/stdout/stdout b/store/streaming/abci/examples/stdout/stdout deleted file mode 100755 index 93f61a7b93..0000000000 Binary files a/store/streaming/abci/examples/stdout/stdout and /dev/null differ diff --git a/store/streaming/abci/examples/stdout/stdout.go b/store/streaming/abci/examples/stdout/stdout.go deleted file mode 100644 index f1327a5862..0000000000 --- a/store/streaming/abci/examples/stdout/stdout.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "context" - "fmt" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/hashicorp/go-plugin" - - streamingabci "cosmossdk.io/store/streaming/abci" - store "cosmossdk.io/store/types" -) - -// StdoutPlugin is the implementation of the ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type StdoutPlugin struct { - BlockHeight int64 -} - -func (a *StdoutPlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error { - a.BlockHeight = req.Height - // process tx messages (i.e: sent to external system) - fmt.Printf("listen-finalize-block: block-height=%d req=%v res=%v", a.BlockHeight, req, res) - return nil -} - -func (a *StdoutPlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { - // process block commit messages (i.e: sent to external system) - fmt.Printf("listen-commit: block_height=%d res=%v data=%v", a.BlockHeight, res, changeSet) - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: streamingabci.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &streamingabci.ListenerGRPCPlugin{Impl: &StdoutPlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/store/streaming/abci/grpc.go b/store/streaming/abci/grpc.go deleted file mode 100644 index 5984e8f950..0000000000 --- a/store/streaming/abci/grpc.go +++ /dev/null @@ -1,79 +0,0 @@ -package abci - -import ( - "context" - "os" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/hashicorp/go-plugin" - - storetypes "cosmossdk.io/store/types" -) - -var _ storetypes.ABCIListener = (*GRPCClient)(nil) - -// GRPCClient is an implementation of the ABCIListener interface that talks over RPC. -type GRPCClient struct { - client ABCIListenerServiceClient -} - -// ListenEndBlock listens to end block request and responses. -// In addition, it retrieves a types.Context from a context.Context instance. -// It panics if a types.Context was not properly attached. -// When the node is configured to stop on listening errors, -// it will terminate immediately and exit with a non-zero code. -func (m *GRPCClient) ListenFinalizeBlock(goCtx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error { - ctx := goCtx.(storetypes.Context) - sm := ctx.StreamingManager() - request := &ListenFinalizeBlockRequest{Req: &req, Res: &res} - _, err := m.client.ListenFinalizeBlock(goCtx, request) - if err != nil && sm.StopNodeOnErr { - ctx.Logger().Error("FinalizeBlock listening hook failed", "height", ctx.BlockHeight(), "err", err) - cleanupAndExit() - } - return err -} - -// ListenCommit listens to commit responses and state changes for the current block. -// In addition, it retrieves a types.Context from a context.Context instance. -// It panics if a types.Context was not properly attached. -// When the node is configured to stop on listening errors, -// it will terminate immediately and exit with a non-zero code. -func (m *GRPCClient) ListenCommit(goCtx context.Context, res abci.ResponseCommit, changeSet []*storetypes.StoreKVPair) error { - ctx := goCtx.(storetypes.Context) - sm := ctx.StreamingManager() - request := &ListenCommitRequest{BlockHeight: ctx.BlockHeight(), Res: &res, ChangeSet: changeSet} - _, err := m.client.ListenCommit(goCtx, request) - if err != nil && sm.StopNodeOnErr { - ctx.Logger().Error("Commit listening hook failed", "height", ctx.BlockHeight(), "err", err) - cleanupAndExit() - } - return err -} - -func cleanupAndExit() { - plugin.CleanupClients() - os.Exit(1) -} - -var _ ABCIListenerServiceServer = (*GRPCServer)(nil) - -// GRPCServer is the gRPC server that GRPCClient talks to. -type GRPCServer struct { - // This is the real implementation - Impl storetypes.ABCIListener -} - -func (m GRPCServer) ListenFinalizeBlock(ctx context.Context, request *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) { - if err := m.Impl.ListenFinalizeBlock(ctx, *request.Req, *request.Res); err != nil { - return nil, err - } - return &ListenFinalizeBlockResponse{}, nil -} - -func (m GRPCServer) ListenCommit(ctx context.Context, request *ListenCommitRequest) (*ListenCommitResponse, error) { - if err := m.Impl.ListenCommit(ctx, *request.Res, request.ChangeSet); err != nil { - return nil, err - } - return &ListenCommitResponse{}, nil -} diff --git a/store/streaming/abci/grpc.pb.go b/store/streaming/abci/grpc.pb.go deleted file mode 100644 index 77ae842ad4..0000000000 --- a/store/streaming/abci/grpc.pb.go +++ /dev/null @@ -1,1047 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/streaming/abci/grpc.proto - -package abci - -import ( - context "context" - types1 "cosmossdk.io/store/types" - fmt "fmt" - types "github.com/cometbft/cometbft/abci/types" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ListenEndBlockRequest is the request type for the ListenEndBlock RPC method -type ListenFinalizeBlockRequest struct { - Req *types.RequestFinalizeBlock `protobuf:"bytes,1,opt,name=req,proto3" json:"req,omitempty"` - Res *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=res,proto3" json:"res,omitempty"` -} - -func (m *ListenFinalizeBlockRequest) Reset() { *m = ListenFinalizeBlockRequest{} } -func (m *ListenFinalizeBlockRequest) String() string { return proto.CompactTextString(m) } -func (*ListenFinalizeBlockRequest) ProtoMessage() {} -func (*ListenFinalizeBlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{0} -} -func (m *ListenFinalizeBlockRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenFinalizeBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenFinalizeBlockRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenFinalizeBlockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenFinalizeBlockRequest.Merge(m, src) -} -func (m *ListenFinalizeBlockRequest) XXX_Size() int { - return m.Size() -} -func (m *ListenFinalizeBlockRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenFinalizeBlockRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenFinalizeBlockRequest proto.InternalMessageInfo - -func (m *ListenFinalizeBlockRequest) GetReq() *types.RequestFinalizeBlock { - if m != nil { - return m.Req - } - return nil -} - -func (m *ListenFinalizeBlockRequest) GetRes() *types.ResponseFinalizeBlock { - if m != nil { - return m.Res - } - return nil -} - -// ListenEndBlockResponse is the response type for the ListenEndBlock RPC method -type ListenFinalizeBlockResponse struct { -} - -func (m *ListenFinalizeBlockResponse) Reset() { *m = ListenFinalizeBlockResponse{} } -func (m *ListenFinalizeBlockResponse) String() string { return proto.CompactTextString(m) } -func (*ListenFinalizeBlockResponse) ProtoMessage() {} -func (*ListenFinalizeBlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{1} -} -func (m *ListenFinalizeBlockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenFinalizeBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenFinalizeBlockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenFinalizeBlockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenFinalizeBlockResponse.Merge(m, src) -} -func (m *ListenFinalizeBlockResponse) XXX_Size() int { - return m.Size() -} -func (m *ListenFinalizeBlockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListenFinalizeBlockResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenFinalizeBlockResponse proto.InternalMessageInfo - -// ListenCommitRequest is the request type for the ListenCommit RPC method -type ListenCommitRequest struct { - // explicitly pass in block height as ResponseCommit does not contain this info - BlockHeight int64 `protobuf:"varint,1,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - Res *types.ResponseCommit `protobuf:"bytes,2,opt,name=res,proto3" json:"res,omitempty"` - ChangeSet []*types1.StoreKVPair `protobuf:"bytes,3,rep,name=change_set,json=changeSet,proto3" json:"change_set,omitempty"` -} - -func (m *ListenCommitRequest) Reset() { *m = ListenCommitRequest{} } -func (m *ListenCommitRequest) String() string { return proto.CompactTextString(m) } -func (*ListenCommitRequest) ProtoMessage() {} -func (*ListenCommitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{2} -} -func (m *ListenCommitRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenCommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenCommitRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenCommitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenCommitRequest.Merge(m, src) -} -func (m *ListenCommitRequest) XXX_Size() int { - return m.Size() -} -func (m *ListenCommitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenCommitRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenCommitRequest proto.InternalMessageInfo - -func (m *ListenCommitRequest) GetBlockHeight() int64 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *ListenCommitRequest) GetRes() *types.ResponseCommit { - if m != nil { - return m.Res - } - return nil -} - -func (m *ListenCommitRequest) GetChangeSet() []*types1.StoreKVPair { - if m != nil { - return m.ChangeSet - } - return nil -} - -// ListenCommitResponse is the response type for the ListenCommit RPC method -type ListenCommitResponse struct { -} - -func (m *ListenCommitResponse) Reset() { *m = ListenCommitResponse{} } -func (m *ListenCommitResponse) String() string { return proto.CompactTextString(m) } -func (*ListenCommitResponse) ProtoMessage() {} -func (*ListenCommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{3} -} -func (m *ListenCommitResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenCommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenCommitResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenCommitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenCommitResponse.Merge(m, src) -} -func (m *ListenCommitResponse) XXX_Size() int { - return m.Size() -} -func (m *ListenCommitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListenCommitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenCommitResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ListenFinalizeBlockRequest)(nil), "cosmos.store.streaming.abci.ListenFinalizeBlockRequest") - proto.RegisterType((*ListenFinalizeBlockResponse)(nil), "cosmos.store.streaming.abci.ListenFinalizeBlockResponse") - proto.RegisterType((*ListenCommitRequest)(nil), "cosmos.store.streaming.abci.ListenCommitRequest") - proto.RegisterType((*ListenCommitResponse)(nil), "cosmos.store.streaming.abci.ListenCommitResponse") -} - -func init() { - proto.RegisterFile("cosmos/store/streaming/abci/grpc.proto", fileDescriptor_7b98083eb9315fb6) -} - -var fileDescriptor_7b98083eb9315fb6 = []byte{ - // 409 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x31, 0x6f, 0xda, 0x40, - 0x14, 0xc7, 0x31, 0x96, 0x2a, 0xf5, 0x60, 0x3a, 0xaa, 0x0a, 0x19, 0xd5, 0x05, 0xab, 0x45, 0x4c, - 0xe7, 0x9a, 0x0e, 0x20, 0x75, 0x69, 0x41, 0xaa, 0x5a, 0xb5, 0x43, 0x05, 0x52, 0x87, 0x2c, 0xc8, - 0x36, 0x4f, 0xe6, 0x04, 0xf6, 0x99, 0xbb, 0x0b, 0x52, 0xf2, 0x09, 0xb2, 0x25, 0x4b, 0x3e, 0x46, - 0xbe, 0x47, 0x46, 0xc6, 0x8c, 0x11, 0x7c, 0x91, 0xc8, 0x77, 0x84, 0x60, 0x05, 0xa2, 0x30, 0xf2, - 0xee, 0xff, 0x7b, 0xef, 0x77, 0xbc, 0x33, 0x6a, 0x86, 0x4c, 0xc4, 0x4c, 0xb8, 0x42, 0x32, 0x0e, - 0xae, 0x90, 0x1c, 0xfc, 0x98, 0x26, 0x91, 0xeb, 0x07, 0x21, 0x75, 0x23, 0x9e, 0x86, 0x24, 0xe5, - 0x4c, 0x32, 0x5c, 0xd3, 0x39, 0xa2, 0x72, 0x64, 0x9b, 0x23, 0x59, 0xce, 0xaa, 0x49, 0x48, 0xc6, - 0xc0, 0x63, 0x9a, 0x48, 0x0d, 0xca, 0xb3, 0x14, 0x84, 0x26, 0xad, 0x4f, 0xb9, 0x09, 0x0b, 0x2f, - 0x00, 0xe9, 0x7b, 0xee, 0x8c, 0x0a, 0x09, 0x49, 0xd6, 0x41, 0xa5, 0x9c, 0x4b, 0x03, 0x59, 0x7f, - 0x55, 0xed, 0x27, 0x4d, 0xfc, 0x19, 0x3d, 0x87, 0xde, 0x8c, 0x85, 0xd3, 0x01, 0xcc, 0x4f, 0x41, - 0x48, 0xdc, 0x41, 0x26, 0x87, 0x79, 0xd5, 0xa8, 0x1b, 0xad, 0x52, 0xfb, 0x33, 0x79, 0x9a, 0xa7, - 0x04, 0xc8, 0x26, 0x96, 0x47, 0x33, 0x02, 0x77, 0x33, 0x50, 0x54, 0x8b, 0x0a, 0x6c, 0xee, 0x01, - 0x45, 0xca, 0x12, 0x01, 0xcf, 0x48, 0xe1, 0x7c, 0x40, 0xb5, 0xbd, 0x42, 0x1a, 0x70, 0x6e, 0x0c, - 0x54, 0xd1, 0xe7, 0x7d, 0x16, 0xc7, 0x54, 0x3e, 0x9a, 0x36, 0x50, 0x39, 0xc8, 0x82, 0xa3, 0x09, - 0xd0, 0x68, 0x22, 0x95, 0xb2, 0x39, 0x28, 0xa9, 0xda, 0x2f, 0x55, 0xc2, 0xde, 0xae, 0xd3, 0xc7, - 0x83, 0x4e, 0x9b, 0xbe, 0x59, 0x16, 0x7f, 0x47, 0x28, 0x9c, 0xf8, 0x49, 0x04, 0x23, 0x01, 0xb2, - 0x6a, 0xd6, 0xcd, 0x56, 0xa9, 0xdd, 0x20, 0xb9, 0x9d, 0x6c, 0xfe, 0x59, 0x32, 0xcc, 0x7e, 0xfd, - 0xf9, 0xff, 0xcf, 0xa7, 0x7c, 0xf0, 0x56, 0x43, 0x43, 0x90, 0xce, 0x7b, 0xf4, 0x2e, 0xaf, 0xab, - 0x87, 0xb4, 0xaf, 0x8b, 0xa8, 0xf2, 0xa3, 0xd7, 0xff, 0xad, 0x0f, 0x81, 0x0f, 0x81, 0x2f, 0x68, - 0x08, 0xf8, 0x62, 0x7b, 0xbf, 0xdc, 0xfd, 0x71, 0x87, 0xbc, 0xf0, 0x12, 0xc8, 0xe1, 0x15, 0x5a, - 0xdd, 0xe3, 0x41, 0xad, 0x88, 0x05, 0x2a, 0xef, 0xaa, 0xe3, 0x2f, 0xaf, 0xe8, 0x94, 0x5b, 0x8a, - 0xe5, 0x1d, 0x41, 0xe8, 0xa1, 0xbd, 0x6f, 0xb7, 0x2b, 0xdb, 0x58, 0xae, 0x6c, 0xe3, 0x7e, 0x65, - 0x1b, 0x57, 0x6b, 0xbb, 0xb0, 0x5c, 0xdb, 0x85, 0xbb, 0xb5, 0x5d, 0x38, 0x69, 0xe8, 0x5e, 0x62, - 0x3c, 0x25, 0x94, 0xed, 0xfd, 0x70, 0x82, 0x37, 0xea, 0x51, 0x7f, 0x7d, 0x08, 0x00, 0x00, 0xff, - 0xff, 0xa8, 0x04, 0x3e, 0xdb, 0x5e, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ABCIListenerServiceClient is the client API for ABCIListenerService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ABCIListenerServiceClient interface { - // ListenFinalizeBlock is the corresponding endpoint for ABCIListener.ListenEndBlock - ListenFinalizeBlock(ctx context.Context, in *ListenFinalizeBlockRequest, opts ...grpc.CallOption) (*ListenFinalizeBlockResponse, error) - // ListenCommit is the corresponding endpoint for ABCIListener.ListenCommit - ListenCommit(ctx context.Context, in *ListenCommitRequest, opts ...grpc.CallOption) (*ListenCommitResponse, error) -} - -type aBCIListenerServiceClient struct { - cc grpc1.ClientConn -} - -func NewABCIListenerServiceClient(cc grpc1.ClientConn) ABCIListenerServiceClient { - return &aBCIListenerServiceClient{cc} -} - -func (c *aBCIListenerServiceClient) ListenFinalizeBlock(ctx context.Context, in *ListenFinalizeBlockRequest, opts ...grpc.CallOption) (*ListenFinalizeBlockResponse, error) { - out := new(ListenFinalizeBlockResponse) - err := c.cc.Invoke(ctx, "/cosmos.store.streaming.abci.ABCIListenerService/ListenFinalizeBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIListenerServiceClient) ListenCommit(ctx context.Context, in *ListenCommitRequest, opts ...grpc.CallOption) (*ListenCommitResponse, error) { - out := new(ListenCommitResponse) - err := c.cc.Invoke(ctx, "/cosmos.store.streaming.abci.ABCIListenerService/ListenCommit", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ABCIListenerServiceServer is the server API for ABCIListenerService service. -type ABCIListenerServiceServer interface { - // ListenFinalizeBlock is the corresponding endpoint for ABCIListener.ListenEndBlock - ListenFinalizeBlock(context.Context, *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) - // ListenCommit is the corresponding endpoint for ABCIListener.ListenCommit - ListenCommit(context.Context, *ListenCommitRequest) (*ListenCommitResponse, error) -} - -// UnimplementedABCIListenerServiceServer can be embedded to have forward compatible implementations. -type UnimplementedABCIListenerServiceServer struct { -} - -func (*UnimplementedABCIListenerServiceServer) ListenFinalizeBlock(ctx context.Context, req *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListenFinalizeBlock not implemented") -} -func (*UnimplementedABCIListenerServiceServer) ListenCommit(ctx context.Context, req *ListenCommitRequest) (*ListenCommitResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListenCommit not implemented") -} - -func RegisterABCIListenerServiceServer(s grpc1.Server, srv ABCIListenerServiceServer) { - s.RegisterService(&_ABCIListenerService_serviceDesc, srv) -} - -func _ABCIListenerService_ListenFinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListenFinalizeBlockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIListenerServiceServer).ListenFinalizeBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cosmos.store.streaming.abci.ABCIListenerService/ListenFinalizeBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIListenerServiceServer).ListenFinalizeBlock(ctx, req.(*ListenFinalizeBlockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCIListenerService_ListenCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListenCommitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIListenerServiceServer).ListenCommit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cosmos.store.streaming.abci.ABCIListenerService/ListenCommit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIListenerServiceServer).ListenCommit(ctx, req.(*ListenCommitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ABCIListenerService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cosmos.store.streaming.abci.ABCIListenerService", - HandlerType: (*ABCIListenerServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListenFinalizeBlock", - Handler: _ABCIListenerService_ListenFinalizeBlock_Handler, - }, - { - MethodName: "ListenCommit", - Handler: _ABCIListenerService_ListenCommit_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "cosmos/store/streaming/abci/grpc.proto", -} - -func (m *ListenFinalizeBlockRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenFinalizeBlockRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenFinalizeBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Res != nil { - { - size, err := m.Res.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Req != nil { - { - size, err := m.Req.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListenFinalizeBlockResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenFinalizeBlockResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenFinalizeBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ListenCommitRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenCommitRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenCommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChangeSet) > 0 { - for iNdEx := len(m.ChangeSet) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChangeSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Res != nil { - { - size, err := m.Res.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.BlockHeight != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.BlockHeight)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ListenCommitResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenCommitResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenCommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintGrpc(dAtA []byte, offset int, v uint64) int { - offset -= sovGrpc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ListenFinalizeBlockRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Req != nil { - l = m.Req.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - if m.Res != nil { - l = m.Res.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *ListenFinalizeBlockResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ListenCommitRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockHeight != 0 { - n += 1 + sovGrpc(uint64(m.BlockHeight)) - } - if m.Res != nil { - l = m.Res.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - if len(m.ChangeSet) > 0 { - for _, e := range m.ChangeSet { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *ListenCommitResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovGrpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGrpc(x uint64) (n int) { - return sovGrpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ListenFinalizeBlockRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenFinalizeBlockRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenFinalizeBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Req", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Req == nil { - m.Req = &types.RequestFinalizeBlock{} - } - if err := m.Req.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Res", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Res == nil { - m.Res = &types.ResponseFinalizeBlock{} - } - if err := m.Res.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenFinalizeBlockResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenFinalizeBlockResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenFinalizeBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenCommitRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenCommitRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenCommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) - } - m.BlockHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BlockHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Res", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Res == nil { - m.Res = &types.ResponseCommit{} - } - if err := m.Res.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChangeSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChangeSet = append(m.ChangeSet, &types1.StoreKVPair{}) - if err := m.ChangeSet[len(m.ChangeSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenCommitResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenCommitResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenCommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGrpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGrpc - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGrpc - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGrpc - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGrpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGrpc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGrpc = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/streaming/abci/interface.go b/store/streaming/abci/interface.go deleted file mode 100644 index cecc1b0ad0..0000000000 --- a/store/streaming/abci/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package abci contains shared data between the host and plugins. -package abci - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - storetypes "cosmossdk.io/store/types" -) - -// Handshake is a common handshake that is shared by streaming and host. -// This prevents users from executing bad plugins or executing a plugin -// directory. It is a UX feature, not a security feature. -var Handshake = plugin.HandshakeConfig{ - // This isn't required when using VersionedPlugins - ProtocolVersion: 1, - MagicCookieKey: "ABCI_LISTENER_PLUGIN", - MagicCookieValue: "ef78114d-7bdf-411c-868f-347c99a78345", -} - -var _ plugin.GRPCPlugin = (*ListenerGRPCPlugin)(nil) - -// ListenerGRPCPlugin is the implementation of plugin.GRPCPlugin, so we can serve/consume this. -type ListenerGRPCPlugin struct { - // GRPCPlugin must still implement the Plugin interface - plugin.Plugin - // Concrete implementation, written in Go. This is only used for plugins - // that are written in Go. - Impl storetypes.ABCIListener -} - -func (p *ListenerGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - RegisterABCIListenerServiceServer(s, &GRPCServer{Impl: p.Impl}) - return nil -} - -func (p *ListenerGRPCPlugin) GRPCClient( - _ context.Context, - _ *plugin.GRPCBroker, - c *grpc.ClientConn, -) (interface{}, error) { - return &GRPCClient{client: NewABCIListenerServiceClient(c)}, nil -} diff --git a/store/streaming/streaming.go b/store/streaming/streaming.go deleted file mode 100644 index f553fd16a4..0000000000 --- a/store/streaming/streaming.go +++ /dev/null @@ -1,79 +0,0 @@ -package streaming - -import ( - "fmt" - "os" - "os/exec" - "strings" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - - streamingabci "cosmossdk.io/store/streaming/abci" -) - -const pluginEnvKeyPrefix = "COSMOS_SDK" - -// HandshakeMap contains a map of each supported streaming's handshake config -var HandshakeMap = map[string]plugin.HandshakeConfig{ - "abci": streamingabci.Handshake, -} - -// PluginMap contains a map of supported gRPC plugins -var PluginMap = map[string]plugin.Plugin{ - "abci": &streamingabci.ListenerGRPCPlugin{}, -} - -func GetPluginEnvKey(name string) string { - return fmt.Sprintf("%s_%s", pluginEnvKeyPrefix, strings.ToUpper(name)) -} - -func NewStreamingPlugin(name, logLevel string) (interface{}, error) { - logger := hclog.New(&hclog.LoggerOptions{ - Output: hclog.DefaultOutput, - Level: toHclogLevel(logLevel), - Name: fmt.Sprintf("plugin.%s", name), - }) - - // We're a host. Start by launching the streaming process. - env := os.Getenv(GetPluginEnvKey(name)) - client := plugin.NewClient(&plugin.ClientConfig{ - HandshakeConfig: HandshakeMap[name], - Managed: true, - Plugins: PluginMap, - // For verifying the integrity of executables see SecureConfig documentation - // https://pkg.go.dev/github.com/hashicorp/go-plugin#SecureConfig - //#nosec G204 -- Required to load plugins - Cmd: exec.Command("sh", "-c", env), - Logger: logger, - AllowedProtocols: []plugin.Protocol{ - plugin.ProtocolNetRPC, plugin.ProtocolGRPC, - }, - }) - - // Connect via RPC - rpcClient, err := client.Client() - if err != nil { - return nil, err - } - - // Request streaming plugin - return rpcClient.Dispense(name) -} - -func toHclogLevel(s string) hclog.Level { - switch s { - case "trace": - return hclog.Trace - case "debug": - return hclog.Debug - case "info": - return hclog.Info - case "warn": - return hclog.Warn - case "error": - return hclog.Error - default: - return hclog.DefaultLevel - } -} diff --git a/store/streaming/streaming_test.go b/store/streaming/streaming_test.go deleted file mode 100644 index e6124838b5..0000000000 --- a/store/streaming/streaming_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package streaming - -import ( - "context" - "fmt" - "os" - "runtime" - "testing" - "time" - - abci "github.com/cometbft/cometbft/abci/types" - tmproto "github.com/cometbft/cometbft/proto/tendermint/types" - "github.com/cosmos/gogoproto/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "cosmossdk.io/log" - storetypes "cosmossdk.io/store/types" -) - -type PluginTestSuite struct { - suite.Suite - - loggerCtx MockContext - - workDir string - - finalizeBlockReq abci.RequestFinalizeBlock - finalizeBlockRes abci.ResponseFinalizeBlock - commitRes abci.ResponseCommit - - changeSet []*storetypes.StoreKVPair -} - -func (s *PluginTestSuite) SetupTest() { - if runtime.GOOS != "linux" { - s.T().Skip("only run on linux") - } - - path, err := os.Getwd() - if err != nil { - s.T().Fail() - } - s.workDir = path - - pluginVersion := "abci" - // to write data to files, replace stdout/stdout => file/file - pluginPath := fmt.Sprintf("%s/abci/examples/stdout/stdout", s.workDir) - if err := os.Setenv(GetPluginEnvKey(pluginVersion), pluginPath); err != nil { - s.T().Fail() - } - - raw, err := NewStreamingPlugin(pluginVersion, "trace") - require.NoError(s.T(), err, "load", "streaming", "unexpected error") - - abciListener, ok := raw.(storetypes.ABCIListener) - require.True(s.T(), ok, "should pass type check") - - header := tmproto.Header{Height: 1, Time: time.Now()} - logger := log.NewNopLogger() - streamingService := storetypes.StreamingManager{ - ABCIListeners: []storetypes.ABCIListener{abciListener}, - StopNodeOnErr: true, - } - s.loggerCtx = NewMockContext(header, logger, streamingService) - - // test abci message types - - s.finalizeBlockReq = abci.RequestFinalizeBlock{ - Height: s.loggerCtx.BlockHeight(), - Txs: [][]byte{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}, - Misbehavior: []abci.Misbehavior{}, - Hash: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - DecidedLastCommit: abci.CommitInfo{}, - } - s.finalizeBlockRes = abci.ResponseFinalizeBlock{ - Events: []abci.Event{}, - ConsensusParamUpdates: &tmproto.ConsensusParams{}, - ValidatorUpdates: []abci.ValidatorUpdate{}, - TxResults: []*abci.ExecTxResult{{ - Events: []abci.Event{}, - Code: 1, - Codespace: "mockCodeSpace", - Data: []byte{5, 6, 7, 8}, - GasUsed: 2, - GasWanted: 3, - Info: "mockInfo", - Log: "mockLog", - }}, - } - s.commitRes = abci.ResponseCommit{} - - // test store kv pair types - for range [2000]int{} { - s.changeSet = append(s.changeSet, &storetypes.StoreKVPair{ - StoreKey: "mockStore", - Delete: false, - Key: []byte{1, 2, 3}, - Value: []byte{3, 2, 1}, - }) - } -} - -func TestPluginTestSuite(t *testing.T) { - suite.Run(t, new(PluginTestSuite)) -} - -func (s *PluginTestSuite) TestABCIGRPCPlugin() { - s.T().Run("Should successfully load streaming", func(t *testing.T) { - abciListeners := s.loggerCtx.StreamingManager().ABCIListeners - for _, abciListener := range abciListeners { - for i := range [50]int{} { - - err := abciListener.ListenFinalizeBlock(s.loggerCtx, s.finalizeBlockReq, s.finalizeBlockRes) - assert.NoError(t, err, "ListenEndBlock") - - err = abciListener.ListenCommit(s.loggerCtx, s.commitRes, s.changeSet) - assert.NoError(t, err, "ListenCommit") - - s.updateHeight(int64(i + 1)) - } - } - }) -} - -func (s *PluginTestSuite) updateHeight(n int64) { - header := s.loggerCtx.BlockHeader() - header.Height = n - s.loggerCtx = NewMockContext(header, s.loggerCtx.Logger(), s.loggerCtx.StreamingManager()) -} - -var ( - _ context.Context = MockContext{} - _ storetypes.Context = MockContext{} -) - -type MockContext struct { - baseCtx context.Context - header tmproto.Header - logger log.Logger - streamingManager storetypes.StreamingManager -} - -func (m MockContext) BlockHeight() int64 { return m.header.Height } -func (m MockContext) Logger() log.Logger { return m.logger } -func (m MockContext) StreamingManager() storetypes.StreamingManager { return m.streamingManager } - -func (m MockContext) BlockHeader() tmproto.Header { - msg := proto.Clone(&m.header).(*tmproto.Header) - return *msg -} - -func NewMockContext(header tmproto.Header, logger log.Logger, sm storetypes.StreamingManager) MockContext { - header.Time = header.Time.UTC() - return MockContext{ - baseCtx: context.Background(), - header: header, - logger: logger, - streamingManager: sm, - } -} - -func (m MockContext) Deadline() (deadline time.Time, ok bool) { - return m.baseCtx.Deadline() -} - -func (m MockContext) Done() <-chan struct{} { - return m.baseCtx.Done() -} - -func (m MockContext) Err() error { - return m.baseCtx.Err() -} - -func (m MockContext) Value(key any) any { - return m.baseCtx.Value(key) -} diff --git a/store/trace.go b/store/trace.go new file mode 100644 index 0000000000..b67aec3472 --- /dev/null +++ b/store/trace.go @@ -0,0 +1,25 @@ +package store + +import "golang.org/x/exp/maps" + +// TraceContext contains KVStore context data. It will be written with every +// trace operation. +type TraceContext map[string]any + +// Clone creates a shallow clone of a TraceContext. +func (tc TraceContext) Clone() TraceContext { + return maps.Clone(tc) +} + +// Merge merges the receiver TraceContext with the provided TraceContext argument. +func (tc TraceContext) Merge(newTc TraceContext) TraceContext { + if tc == nil { + tc = TraceContext{} + } + + for k, v := range newTc { + tc[k] = v + } + + return tc +} diff --git a/store/tracekv/README.md b/store/tracekv/README.md new file mode 100644 index 0000000000..eda74979d8 --- /dev/null +++ b/store/tracekv/README.md @@ -0,0 +1,15 @@ +# tracekv + +The `tracekv.Store` implementation defines a store which wraps a parent `KVStore` +and traces all operations performed on it. Each trace operation is written to a +provided `io.Writer` object. Specifically, a `TraceOperation` object is JSON +encoded and written to the writer. The `TraceOperation` object contains the exact +operation, e.g. a read or write, and the corresponding key and value pair. + +A `tracekv.Store` can also be instantiated with a `store.TraceContext` which +can allow each traced operation to include additional metadata, e.g. a block height +or hash. + +Note, `tracekv.Store` is not meant to be branched or written to. The parent `KVStore` +is responsible for all branching and writing operations, while a `tracekv.Store` +wraps such a store and traces all relevant operations on it. diff --git a/store/tracekv/doc.go b/store/tracekv/doc.go new file mode 100644 index 0000000000..b0d215adc9 --- /dev/null +++ b/store/tracekv/doc.go @@ -0,0 +1,7 @@ +/* +Package tracekv provides a KVStore implementation that wraps a parent KVStore +and allows all operations to be traced to an io.Writer. This can be useful to +serve use cases such as tracing and digesting all read operations for a specific +store key and key or value. +*/ +package tracekv diff --git a/store/tracekv/iterator.go b/store/tracekv/iterator.go new file mode 100644 index 0000000000..7ee39d5837 --- /dev/null +++ b/store/tracekv/iterator.go @@ -0,0 +1,57 @@ +package tracekv + +import ( + "io" + + "cosmossdk.io/store/v2" +) + +var _ store.Iterator = (*iterator)(nil) + +type iterator struct { + parent store.Iterator + writer io.Writer + context store.TraceContext +} + +func newIterator(w io.Writer, parent store.Iterator, tc store.TraceContext) store.Iterator { + return &iterator{ + parent: parent, + writer: w, + context: tc, + } +} + +func (itr *iterator) Domain() ([]byte, []byte) { + return itr.parent.Domain() +} + +func (itr *iterator) Valid() bool { + return itr.parent.Valid() +} + +func (itr *iterator) Next() bool { + return itr.parent.Next() +} + +func (itr *iterator) Error() error { + return itr.parent.Error() +} + +func (itr *iterator) Close() { + itr.parent.Close() +} + +func (itr *iterator) Key() []byte { + key := itr.parent.Key() + + writeOperation(itr.writer, IterKeyOp, itr.context, key, nil) + return key +} + +func (itr *iterator) Value() []byte { + value := itr.parent.Value() + + writeOperation(itr.writer, IterValueOp, itr.context, nil, value) + return value +} diff --git a/store/tracekv/store.go b/store/tracekv/store.go index ba6df431da..549cd345f5 100644 --- a/store/tracekv/store.go +++ b/store/tracekv/store.go @@ -5,178 +5,110 @@ import ( "encoding/json" "io" - "cosmossdk.io/errors" - "cosmossdk.io/store/types" + "github.com/cockroachdb/errors" + + "cosmossdk.io/store/v2" ) +// Operation types for tracing KVStore operations. const ( - writeOp operation = "write" - readOp operation = "read" - deleteOp operation = "delete" - iterKeyOp operation = "iterKey" - iterValueOp operation = "iterValue" + WriteOp = "write" + ReadOp = "read" + DeleteOp = "delete" + IterKeyOp = "iterKey" + IterValueOp = "iterValue" ) +var _ store.BranchedKVStore = (*Store)(nil) + type ( - // Store implements the KVStore interface with tracing enabled. - // Operations are traced on each core KVStore call and written to the - // underlying io.writer. - // - // TODO: Should we use a buffered writer and implement Commit on - // Store? + // Store defines a KVStore used for tracing capabilities, which typically wraps + // another KVStore implementation. Store struct { - parent types.KVStore + parent store.KVStore + context store.TraceContext writer io.Writer - context types.TraceContext } - // operation represents an IO operation - operation string - - // traceOperation implements a traced KVStore operation - traceOperation struct { - Operation operation `json:"operation"` - Key string `json:"key"` - Value string `json:"value"` - Metadata map[string]interface{} `json:"metadata"` + // TraceOperation defines a traced KVStore operation, such as a read or write + TraceOperation struct { + Operation string `json:"operation"` + Key string `json:"key"` + Value string `json:"value"` + Metadata map[string]any `json:"metadata"` } ) -// NewStore returns a reference to a new traceKVStore given a parent -// KVStore implementation and a buffered writer. -func NewStore(parent types.KVStore, writer io.Writer, tc types.TraceContext) *Store { - return &Store{parent: parent, writer: writer, context: tc} -} - -// Get implements the KVStore interface. It traces a read operation and -// delegates a Get call to the parent KVStore. -func (tkv *Store) Get(key []byte) []byte { - value := tkv.parent.Get(key) - - writeOperation(tkv.writer, readOp, tkv.context, key, value) - return value -} - -// Set implements the KVStore interface. It traces a write operation and -// delegates the Set call to the parent KVStore. -func (tkv *Store) Set(key, value []byte) { - types.AssertValidKey(key) - writeOperation(tkv.writer, writeOp, tkv.context, key, value) - tkv.parent.Set(key, value) -} - -// Delete implements the KVStore interface. It traces a write operation and -// delegates the Delete call to the parent KVStore. -func (tkv *Store) Delete(key []byte) { - writeOperation(tkv.writer, deleteOp, tkv.context, key, nil) - tkv.parent.Delete(key) -} - -// Has implements the KVStore interface. It delegates the Has call to the -// parent KVStore. -func (tkv *Store) Has(key []byte) bool { - return tkv.parent.Has(key) -} - -// Iterator implements the KVStore interface. It delegates the Iterator call -// to the parent KVStore. -func (tkv *Store) Iterator(start, end []byte) types.Iterator { - return tkv.iterator(start, end, true) -} - -// ReverseIterator implements the KVStore interface. It delegates the -// ReverseIterator call to the parent KVStore. -func (tkv *Store) ReverseIterator(start, end []byte) types.Iterator { - return tkv.iterator(start, end, false) -} - -// iterator facilitates iteration over a KVStore. It delegates the necessary -// calls to it's parent KVStore. -func (tkv *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator - - if ascending { - parent = tkv.parent.Iterator(start, end) - } else { - parent = tkv.parent.ReverseIterator(start, end) +func New(p store.KVStore, w io.Writer, tc store.TraceContext) store.BranchedKVStore { + return &Store{ + parent: p, + writer: w, + context: tc, } - - return newTraceIterator(tkv.writer, parent, tkv.context) } -type traceIterator struct { - parent types.Iterator - writer io.Writer - context types.TraceContext +func (s *Store) GetStoreKey() string { + return s.parent.GetStoreKey() } -func newTraceIterator(w io.Writer, parent types.Iterator, tc types.TraceContext) types.Iterator { - return &traceIterator{writer: w, parent: parent, context: tc} +func (s *Store) GetStoreType() store.StoreType { + return store.StoreTypeTrace } -// Domain implements the Iterator interface. -func (ti *traceIterator) Domain() (start, end []byte) { - return ti.parent.Domain() +func (s *Store) GetChangeset() *store.Changeset { + return s.parent.GetChangeset() } -// Valid implements the Iterator interface. -func (ti *traceIterator) Valid() bool { - return ti.parent.Valid() -} - -// Next implements the Iterator interface. -func (ti *traceIterator) Next() { - ti.parent.Next() -} - -// Key implements the Iterator interface. -func (ti *traceIterator) Key() []byte { - key := ti.parent.Key() - - writeOperation(ti.writer, iterKeyOp, ti.context, key, nil) - return key -} - -// Value implements the Iterator interface. -func (ti *traceIterator) Value() []byte { - value := ti.parent.Value() - - writeOperation(ti.writer, iterValueOp, ti.context, nil, value) +func (s *Store) Get(key []byte) []byte { + value := s.parent.Get(key) + writeOperation(s.writer, ReadOp, s.context, key, value) return value } -// Close implements the Iterator interface. -func (ti *traceIterator) Close() error { - return ti.parent.Close() +func (s *Store) Has(key []byte) bool { + return s.parent.Has(key) } -// Error delegates the Error call to the parent iterator. -func (ti *traceIterator) Error() error { - return ti.parent.Error() +func (s *Store) Set(key, value []byte) { + writeOperation(s.writer, WriteOp, s.context, key, value) + s.parent.Set(key, value) } -// GetStoreType implements the KVStore interface. It returns the underlying -// KVStore type. -func (tkv *Store) GetStoreType() types.StoreType { - return tkv.parent.GetStoreType() +func (s *Store) Delete(key []byte) { + writeOperation(s.writer, DeleteOp, s.context, key, nil) + s.parent.Delete(key) } -// CacheWrap implements the KVStore interface. It panics because a Store -// cannot be branched. -func (tkv *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a TraceKVStore") +func (s *Store) Reset() error { + return s.parent.Reset() } -// CacheWrapWithTrace implements the KVStore interface. It panics as a -// Store cannot be branched. -func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a TraceKVStore") +func (s *Store) Write() { + if b, ok := s.parent.(store.BranchedKVStore); ok { + b.Write() + } +} + +func (s *Store) Branch() store.BranchedKVStore { + panic("cannot call Branch() on tracekv.Store") +} + +func (s *Store) BranchWithTrace(_ io.Writer, _ store.TraceContext) store.BranchedKVStore { + panic("cannot call BranchWithTrace() on tracekv.Store") +} + +func (s *Store) Iterator(start, end []byte) store.Iterator { + return newIterator(s.writer, s.parent.Iterator(start, end), s.context) +} + +func (s *Store) ReverseIterator(start, end []byte) store.Iterator { + return newIterator(s.writer, s.parent.ReverseIterator(start, end), s.context) } // writeOperation writes a KVStore operation to the underlying io.Writer as // JSON-encoded data where the key/value pair is base64 encoded. -func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) { - traceOp := traceOperation{ +func writeOperation(w io.Writer, op string, tc store.TraceContext, key, value []byte) { + traceOp := TraceOperation{ Operation: op, Key: base64.StdEncoding.EncodeToString(key), Value: base64.StdEncoding.EncodeToString(value), @@ -197,6 +129,6 @@ func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value _, err = io.WriteString(w, "\n") if err != nil { - panic(errors.Wrap(err, "failed to write newline")) + panic(err) } } diff --git a/store/tracekv/store_test.go b/store/tracekv/store_test.go index 2c42734bae..6f5a9f9145 100644 --- a/store/tracekv/store_test.go +++ b/store/tracekv/store_test.go @@ -6,28 +6,22 @@ import ( "io" "testing" - dbm "github.com/cosmos/cosmos-db" "github.com/stretchr/testify/require" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/prefix" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" + "cosmossdk.io/store/v2" + "cosmossdk.io/store/v2/memkv" + "cosmossdk.io/store/v2/tracekv" ) -func bz(s string) []byte { return []byte(s) } +const storeKey = "storeKey" -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -var kvPairs = []kv.Pair{ - {Key: keyFmt(1), Value: valFmt(1)}, - {Key: keyFmt(2), Value: valFmt(2)}, - {Key: keyFmt(3), Value: valFmt(3)}, +var kvPairs = []store.KVPair{ + {Key: []byte(fmt.Sprintf("key%0.8d", 1)), Value: []byte(fmt.Sprintf("value%0.8d", 1))}, + {Key: []byte(fmt.Sprintf("key%0.8d", 2)), Value: []byte(fmt.Sprintf("value%0.8d", 2))}, + {Key: []byte(fmt.Sprintf("key%0.8d", 3)), Value: []byte(fmt.Sprintf("value%0.8d", 3))}, } -func newTraceKVStore(w io.Writer) *tracekv.Store { +func newTraceKVStore(w io.Writer) store.KVStore { store := newEmptyTraceKVStore(w) for _, kvPair := range kvPairs { @@ -37,11 +31,11 @@ func newTraceKVStore(w io.Writer) *tracekv.Store { return store } -func newEmptyTraceKVStore(w io.Writer) *tracekv.Store { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) +func newEmptyTraceKVStore(w io.Writer) store.KVStore { + memKVStore := memkv.New(storeKey) + tc := store.TraceContext(map[string]any{"blockHeight": 64}) - return tracekv.NewStore(memDB, w, tc) + return tracekv.New(memKVStore, w, tc) } func TestTraceKVStoreGet(t *testing.T) { @@ -67,8 +61,8 @@ func TestTraceKVStoreGet(t *testing.T) { store := newTraceKVStore(&buf) buf.Reset() - value := store.Get(tc.key) + value := store.Get(tc.key) require.Equal(t, tc.expectedValue, value) require.Equal(t, tc.expectedOut, buf.String()) } @@ -209,8 +203,7 @@ func TestTestTraceKVStoreIterator(t *testing.T) { } require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) + require.False(t, iterator.Next()) } func TestTestTraceKVStoreReverseIterator(t *testing.T) { @@ -265,28 +258,10 @@ func TestTestTraceKVStoreReverseIterator(t *testing.T) { } require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) -} - -func TestTraceKVStorePrefix(t *testing.T) { - store := newEmptyTraceKVStore(nil) - pStore := prefix.NewStore(store, []byte("trace_prefix")) - require.IsType(t, prefix.Store{}, pStore) + require.False(t, iterator.Next()) } func TestTraceKVStoreGetStoreType(t *testing.T) { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - store := newEmptyTraceKVStore(nil) - require.Equal(t, memDB.GetStoreType(), store.GetStoreType()) -} - -func TestTraceKVStoreCacheWrap(t *testing.T) { - store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) -} - -func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) { - store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) + traceKVStore := newEmptyTraceKVStore(nil) + require.Equal(t, store.StoreTypeTrace, traceKVStore.GetStoreType()) } diff --git a/store/transient/store.go b/store/transient/store.go deleted file mode 100644 index 6f393279f5..0000000000 --- a/store/transient/store.go +++ /dev/null @@ -1,53 +0,0 @@ -package transient - -import ( - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/dbadapter" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -var ( - _ types.Committer = (*Store)(nil) - _ types.KVStore = (*Store)(nil) -) - -// Store is a wrapper for a MemDB with Commiter implementation -type Store struct { - dbadapter.Store -} - -// Constructs new MemDB adapter -func NewStore() *Store { - return &Store{Store: dbadapter.Store{DB: dbm.NewMemDB()}} -} - -// Implements CommitStore -// Commit cleans up Store. -func (ts *Store) Commit() (id types.CommitID) { - ts.Store = dbadapter.Store{DB: dbm.NewMemDB()} - return -} - -func (ts *Store) SetPruning(_ pruningtypes.PruningOptions) {} - -// GetPruning is a no-op as pruning options cannot be directly set on this store. -// They must be set on the root commit multi-store. -func (ts *Store) GetPruning() pruningtypes.PruningOptions { - return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) -} - -// Implements CommitStore -func (ts *Store) LastCommitID() types.CommitID { - return types.CommitID{} -} - -func (ts *Store) WorkingHash() []byte { - return []byte{} -} - -// Implements Store. -func (ts *Store) GetStoreType() types.StoreType { - return types.StoreTypeTransient -} diff --git a/store/transient/store_test.go b/store/transient/store_test.go deleted file mode 100644 index 341ef41cc4..0000000000 --- a/store/transient/store_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package transient_test - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/require" - - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/transient" -) - -var k, v = []byte("hello"), []byte("world") - -func TestTransientStore(t *testing.T) { - tstore := transient.NewStore() - - require.Nil(t, tstore.Get(k)) - - tstore.Set(k, v) - - require.Equal(t, v, tstore.Get(k)) - - tstore.Commit() - - require.Nil(t, tstore.Get(k)) - - // no-op - tstore.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)) - - emptyCommitID := tstore.LastCommitID() - require.Equal(t, emptyCommitID.Version, int64(0)) - require.True(t, bytes.Equal(emptyCommitID.Hash, nil)) -} diff --git a/store/tree.go b/store/tree.go new file mode 100644 index 0000000000..6fce611490 --- /dev/null +++ b/store/tree.go @@ -0,0 +1,16 @@ +package store + +import ( + ics23 "github.com/cosmos/ics23/go" +) + +// Tree is an interface for a commitment layer to support multiple backends. +type Tree interface { + WriteBatch(cs *Changeset) error + WorkingHash() []byte + GetLatestVersion() uint64 + LoadVersion(targetVersion uint64) error + Commit() ([]byte, error) + GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) + Close() error +} diff --git a/store/types/codec.go b/store/types/codec.go deleted file mode 100644 index 4a5f424873..0000000000 --- a/store/types/codec.go +++ /dev/null @@ -1,89 +0,0 @@ -package types - -import ( - "encoding/binary" - fmt "fmt" - - proto "github.com/cosmos/gogoproto/proto" -) - -// Codec defines a interface needed for the store package to marshal data -type Codec interface { - // Marshal returns binary encoding of v. - Marshal(proto.Message) ([]byte, error) - - // MarshalLengthPrefixed returns binary encoding of v with bytes length prefix. - MarshalLengthPrefixed(proto.Message) ([]byte, error) - - // Unmarshal parses the data encoded with Marshal method and stores the result - // in the value pointed to by v. - Unmarshal(bz []byte, ptr proto.Message) error - - // Unmarshal parses the data encoded with UnmarshalLengthPrefixed method and stores - // the result in the value pointed to by v. - UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error -} - -// ============= TestCodec ============= -// TestCodec defines a codec that utilizes Protobuf for both binary and JSON -// encoding. -type TestCodec struct{} - -var _ Codec = &TestCodec{} - -func NewTestCodec() Codec { - return &TestCodec{} -} - -// Marshal implements BinaryMarshaler.Marshal method. -// NOTE: this function must be used with a concrete type which -// implements proto.Message. For interface please use the codec.MarshalInterface -func (pc *TestCodec) Marshal(o proto.Message) ([]byte, error) { - // Size() check can catch the typed nil value. - if o == nil || proto.Size(o) == 0 { - // return empty bytes instead of nil, because nil has special meaning in places like store.Set - return []byte{}, nil - } - return proto.Marshal(o) -} - -// MarshalLengthPrefixed implements BinaryMarshaler.MarshalLengthPrefixed method. -func (pc *TestCodec) MarshalLengthPrefixed(o proto.Message) ([]byte, error) { - bz, err := pc.Marshal(o) - if err != nil { - return nil, err - } - - var sizeBuf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(sizeBuf[:], uint64(len(bz))) - return append(sizeBuf[:n], bz...), nil -} - -// Unmarshal implements BinaryMarshaler.Unmarshal method. -// NOTE: this function must be used with a concrete type which -// implements proto.Message. For interface please use the codec.UnmarshalInterface -func (pc *TestCodec) Unmarshal(bz []byte, ptr proto.Message) error { - err := proto.Unmarshal(bz, ptr) - if err != nil { - return err - } - - return nil -} - -// UnmarshalLengthPrefixed implements BinaryMarshaler.UnmarshalLengthPrefixed method. -func (pc *TestCodec) UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error { - size, n := binary.Uvarint(bz) - if n < 0 { - return fmt.Errorf("invalid number of bytes read from length-prefixed encoding: %d", n) - } - - if size > uint64(len(bz)-n) { - return fmt.Errorf("not enough bytes to read; want: %v, got: %v", size, len(bz)-n) - } else if size < uint64(len(bz)-n) { - return fmt.Errorf("too many bytes to read; want: %v, got: %v", size, len(bz)-n) - } - - bz = bz[n:] - return proto.Unmarshal(bz, ptr) -} diff --git a/store/types/commit_info.go b/store/types/commit_info.go deleted file mode 100644 index 125111a0c2..0000000000 --- a/store/types/commit_info.go +++ /dev/null @@ -1,53 +0,0 @@ -package types - -import ( - cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - - "cosmossdk.io/store/internal/maps" -) - -// GetHash returns the GetHash from the CommitID. -// This is used in CommitInfo.Hash() -// -// When we commit to this in a merkle proof, we create a map of storeInfo.Name -> storeInfo.GetHash() -// and build a merkle proof from that. -// This is then chained with the substore proof, so we prove the root hash from the substore before this -// and need to pass that (unmodified) as the leaf value of the multistore proof. -func (si StoreInfo) GetHash() []byte { - return si.CommitId.Hash -} - -func (ci CommitInfo) toMap() map[string][]byte { - m := make(map[string][]byte, len(ci.StoreInfos)) - for _, storeInfo := range ci.StoreInfos { - m[storeInfo.Name] = storeInfo.GetHash() - } - - return m -} - -// Hash returns the simple merkle root hash of the stores sorted by name. -func (ci CommitInfo) Hash() []byte { - // we need a special case for empty set, as SimpleProofsFromMap requires at least one entry - if len(ci.StoreInfos) == 0 { - return nil - } - - rootHash, _, _ := maps.ProofsFromMap(ci.toMap()) - return rootHash -} - -func (ci CommitInfo) ProofOp(storeName string) cmtprotocrypto.ProofOp { - ret, err := ProofOpFromMap(ci.toMap(), storeName) - if err != nil { - panic(err) - } - return ret -} - -func (ci CommitInfo) CommitID() CommitID { - return CommitID{ - Version: ci.Version, - Hash: ci.Hash(), - } -} diff --git a/store/types/commit_info.pb.go b/store/types/commit_info.pb.go deleted file mode 100644 index 81220a79c2..0000000000 --- a/store/types/commit_info.pb.go +++ /dev/null @@ -1,864 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/v1beta1/commit_info.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" - _ "google.golang.org/protobuf/types/known/timestamppb" - io "io" - math "math" - math_bits "math/bits" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CommitInfo defines commit information used by the multi-store when committing -// a version/height. -type CommitInfo struct { - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - StoreInfos []StoreInfo `protobuf:"bytes,2,rep,name=store_infos,json=storeInfos,proto3" json:"store_infos"` - Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` -} - -func (m *CommitInfo) Reset() { *m = CommitInfo{} } -func (m *CommitInfo) String() string { return proto.CompactTextString(m) } -func (*CommitInfo) ProtoMessage() {} -func (*CommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_5f8c656cdef8c524, []int{0} -} -func (m *CommitInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CommitInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitInfo.Merge(m, src) -} -func (m *CommitInfo) XXX_Size() int { - return m.Size() -} -func (m *CommitInfo) XXX_DiscardUnknown() { - xxx_messageInfo_CommitInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitInfo proto.InternalMessageInfo - -func (m *CommitInfo) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *CommitInfo) GetStoreInfos() []StoreInfo { - if m != nil { - return m.StoreInfos - } - return nil -} - -func (m *CommitInfo) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -// StoreInfo defines store-specific commit information. It contains a reference -// between a store name and the commit ID. -type StoreInfo struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CommitId CommitID `protobuf:"bytes,2,opt,name=commit_id,json=commitId,proto3" json:"commit_id"` -} - -func (m *StoreInfo) Reset() { *m = StoreInfo{} } -func (m *StoreInfo) String() string { return proto.CompactTextString(m) } -func (*StoreInfo) ProtoMessage() {} -func (*StoreInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_5f8c656cdef8c524, []int{1} -} -func (m *StoreInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StoreInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StoreInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StoreInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_StoreInfo.Merge(m, src) -} -func (m *StoreInfo) XXX_Size() int { - return m.Size() -} -func (m *StoreInfo) XXX_DiscardUnknown() { - xxx_messageInfo_StoreInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_StoreInfo proto.InternalMessageInfo - -func (m *StoreInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *StoreInfo) GetCommitId() CommitID { - if m != nil { - return m.CommitId - } - return CommitID{} -} - -// CommitID defines the commitment information when a specific store is -// committed. -type CommitID struct { - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` -} - -func (m *CommitID) Reset() { *m = CommitID{} } -func (*CommitID) ProtoMessage() {} -func (*CommitID) Descriptor() ([]byte, []int) { - return fileDescriptor_5f8c656cdef8c524, []int{2} -} -func (m *CommitID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommitID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommitID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CommitID) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitID.Merge(m, src) -} -func (m *CommitID) XXX_Size() int { - return m.Size() -} -func (m *CommitID) XXX_DiscardUnknown() { - xxx_messageInfo_CommitID.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitID proto.InternalMessageInfo - -func (m *CommitID) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *CommitID) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func init() { - proto.RegisterType((*CommitInfo)(nil), "cosmos.store.v1beta1.CommitInfo") - proto.RegisterType((*StoreInfo)(nil), "cosmos.store.v1beta1.StoreInfo") - proto.RegisterType((*CommitID)(nil), "cosmos.store.v1beta1.CommitID") -} - -func init() { - proto.RegisterFile("cosmos/store/v1beta1/commit_info.proto", fileDescriptor_5f8c656cdef8c524) -} - -var fileDescriptor_5f8c656cdef8c524 = []byte{ - // 336 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xf2, 0x50, - 0x14, 0xc7, 0x7b, 0xa1, 0xf9, 0x3e, 0x7a, 0x70, 0xba, 0x61, 0x68, 0x18, 0x6e, 0x09, 0x83, 0x61, - 0xba, 0x0d, 0xb8, 0x39, 0x98, 0x58, 0x8d, 0x09, 0x6b, 0x75, 0x72, 0x31, 0x2d, 0x5c, 0x4a, 0xa3, - 0xed, 0x21, 0xdc, 0x2b, 0x89, 0x6f, 0xc1, 0xe8, 0xe8, 0x33, 0xf8, 0x14, 0x8c, 0x8c, 0x4e, 0x6a, - 0xe0, 0x45, 0x4c, 0x4f, 0x5b, 0x5c, 0x88, 0xdb, 0x39, 0xed, 0xef, 0x9c, 0xff, 0xaf, 0xa7, 0x70, - 0x3a, 0x41, 0x9d, 0xa1, 0xf6, 0xb5, 0xc1, 0xa5, 0xf2, 0x57, 0xc3, 0x58, 0x99, 0x68, 0xe8, 0x4f, - 0x30, 0xcb, 0x52, 0xf3, 0x90, 0xe6, 0x33, 0x94, 0x8b, 0x25, 0x1a, 0xe4, 0x9d, 0x92, 0x93, 0xc4, - 0xc9, 0x8a, 0xeb, 0x76, 0x12, 0x4c, 0x90, 0x00, 0xbf, 0xa8, 0x4a, 0xb6, 0xeb, 0x25, 0x88, 0xc9, - 0x93, 0xf2, 0xa9, 0x8b, 0x9f, 0x67, 0xbe, 0x49, 0x33, 0xa5, 0x4d, 0x94, 0x2d, 0x4a, 0xa0, 0xff, - 0xce, 0x00, 0xae, 0x28, 0x62, 0x9c, 0xcf, 0x90, 0xbb, 0xf0, 0x7f, 0xa5, 0x96, 0x3a, 0xc5, 0xdc, - 0x65, 0x3d, 0x36, 0x68, 0x86, 0x75, 0xcb, 0x6f, 0xa0, 0x4d, 0x81, 0x64, 0xa2, 0xdd, 0x46, 0xaf, - 0x39, 0x68, 0x8f, 0x3c, 0x79, 0xcc, 0x45, 0xde, 0x16, 0x5d, 0xb1, 0x2f, 0xb0, 0x37, 0x9f, 0x9e, - 0x15, 0x82, 0xae, 0x1f, 0x68, 0x1e, 0x80, 0x73, 0x70, 0x70, 0x9b, 0x3d, 0x36, 0x68, 0x8f, 0xba, - 0xb2, 0xb4, 0x94, 0xb5, 0xa5, 0xbc, 0xab, 0x89, 0xa0, 0x55, 0x2c, 0x58, 0x7f, 0x79, 0x2c, 0xfc, - 0x1d, 0xeb, 0xc7, 0xe0, 0x1c, 0x22, 0x38, 0x07, 0x3b, 0x8f, 0x32, 0x45, 0xbe, 0x4e, 0x48, 0x35, - 0xbf, 0x04, 0xa7, 0xbe, 0xdb, 0xd4, 0x6d, 0x50, 0x88, 0x38, 0xae, 0x5a, 0x7d, 0xfb, 0x75, 0x65, - 0xda, 0x2a, 0xc7, 0xc6, 0xd3, 0xfe, 0x05, 0xb4, 0xea, 0x77, 0x7f, 0x5c, 0x85, 0x83, 0x3d, 0x8f, - 0xf4, 0x9c, 0x32, 0x4e, 0x42, 0xaa, 0xcf, 0xed, 0xd7, 0x37, 0xcf, 0x0a, 0x46, 0x9b, 0x9d, 0x60, - 0xdb, 0x9d, 0x60, 0xdf, 0x3b, 0xc1, 0xd6, 0x7b, 0x61, 0x6d, 0xf7, 0xc2, 0xfa, 0xd8, 0x0b, 0xeb, - 0xde, 0x2d, 0x45, 0xf4, 0xf4, 0x51, 0xa6, 0x58, 0xfd, 0x6d, 0xf3, 0xb2, 0x50, 0x3a, 0xfe, 0x47, - 0x07, 0x38, 0xfb, 0x09, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb7, 0x0d, 0x59, 0x0a, 0x02, 0x00, 0x00, -} - -func (m *CommitInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintCommitInfo(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x1a - if len(m.StoreInfos) > 0 { - for iNdEx := len(m.StoreInfos) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.StoreInfos[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommitInfo(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Version != 0 { - i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *StoreInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StoreInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StoreInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.CommitId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommitInfo(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CommitID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CommitID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CommitID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x12 - } - if m.Version != 0 { - i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintCommitInfo(dAtA []byte, offset int, v uint64) int { - offset -= sovCommitInfo(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CommitInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Version != 0 { - n += 1 + sovCommitInfo(uint64(m.Version)) - } - if len(m.StoreInfos) > 0 { - for _, e := range m.StoreInfos { - l = e.Size() - n += 1 + l + sovCommitInfo(uint64(l)) - } - } - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovCommitInfo(uint64(l)) - return n -} - -func (m *StoreInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCommitInfo(uint64(l)) - } - l = m.CommitId.Size() - n += 1 + l + sovCommitInfo(uint64(l)) - return n -} - -func (m *CommitID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Version != 0 { - n += 1 + sovCommitInfo(uint64(m.Version)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovCommitInfo(uint64(l)) - } - return n -} - -func sovCommitInfo(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCommitInfo(x uint64) (n int) { - return sovCommitInfo(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CommitInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoreInfos", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoreInfos = append(m.StoreInfos, StoreInfo{}) - if err := m.StoreInfos[len(m.StoreInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommitInfo(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommitInfo - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StoreInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StoreInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StoreInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CommitId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CommitId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommitInfo(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommitInfo - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CommitID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommitID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommitID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommitInfo(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommitInfo - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCommitInfo(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCommitInfo - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCommitInfo - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCommitInfo - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCommitInfo = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCommitInfo = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCommitInfo = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/types/context.go b/store/types/context.go deleted file mode 100644 index 2daccef912..0000000000 --- a/store/types/context.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -import ( - "cosmossdk.io/log" -) - -// Context is an interface used by an App to pass context information -// needed to process store streaming requests. -type Context interface { - BlockHeight() int64 - Logger() log.Logger - StreamingManager() StreamingManager -} diff --git a/store/types/gas.go b/store/types/gas.go deleted file mode 100644 index baceb7ce25..0000000000 --- a/store/types/gas.go +++ /dev/null @@ -1,255 +0,0 @@ -package types - -import ( - "fmt" - "math" -) - -// Gas consumption descriptors. -const ( - GasIterNextCostFlatDesc = "IterNextFlat" - GasValuePerByteDesc = "ValuePerByte" - GasWritePerByteDesc = "WritePerByte" - GasReadPerByteDesc = "ReadPerByte" - GasWriteCostFlatDesc = "WriteFlat" - GasReadCostFlatDesc = "ReadFlat" - GasHasDesc = "Has" - GasDeleteDesc = "Delete" -) - -// Gas measured by the SDK -type Gas = uint64 - -// ErrorNegativeGasConsumed defines an error thrown when the amount of gas refunded results in a -// negative gas consumed amount. -type ErrorNegativeGasConsumed struct { - Descriptor string -} - -// ErrorOutOfGas defines an error thrown when an action results in out of gas. -type ErrorOutOfGas struct { - Descriptor string -} - -// ErrorGasOverflow defines an error thrown when an action results gas consumption -// unsigned integer overflow. -type ErrorGasOverflow struct { - Descriptor string -} - -// GasMeter interface to track gas consumption -type GasMeter interface { - GasConsumed() Gas - GasConsumedToLimit() Gas - GasRemaining() Gas - Limit() Gas - ConsumeGas(amount Gas, descriptor string) - RefundGas(amount Gas, descriptor string) - IsPastLimit() bool - IsOutOfGas() bool - String() string -} - -type basicGasMeter struct { - limit Gas - consumed Gas -} - -// NewGasMeter returns a reference to a new basicGasMeter. -func NewGasMeter(limit Gas) GasMeter { - return &basicGasMeter{ - limit: limit, - consumed: 0, - } -} - -// GasConsumed returns the gas consumed from the GasMeter. -func (g *basicGasMeter) GasConsumed() Gas { - return g.consumed -} - -// GasRemaining returns the gas left in the GasMeter. -func (g *basicGasMeter) GasRemaining() Gas { - if g.IsPastLimit() { - return 0 - } - return g.limit - g.consumed -} - -// Limit returns the gas limit of the GasMeter. -func (g *basicGasMeter) Limit() Gas { - return g.limit -} - -// GasConsumedToLimit returns the gas limit if gas consumed is past the limit, -// otherwise it returns the consumed gas. -// -// NOTE: This behavior is only called when recovering from panic when -// BlockGasMeter consumes gas past the limit. -func (g *basicGasMeter) GasConsumedToLimit() Gas { - if g.IsPastLimit() { - return g.limit - } - return g.consumed -} - -// addUint64Overflow performs the addition operation on two uint64 integers and -// returns a boolean on whether or not the result overflows. -func addUint64Overflow(a, b uint64) (uint64, bool) { - if math.MaxUint64-a < b { - return 0, true - } - - return a + b, false -} - -// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit or out of gas. -func (g *basicGasMeter) ConsumeGas(amount Gas, descriptor string) { - var overflow bool - g.consumed, overflow = addUint64Overflow(g.consumed, amount) - if overflow { - g.consumed = math.MaxUint64 - panic(ErrorGasOverflow{descriptor}) - } - - if g.consumed > g.limit { - panic(ErrorOutOfGas{descriptor}) - } -} - -// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the -// gas consumed, the function will panic. -// -// Use case: This functionality enables refunding gas to the transaction or block gas pools so that -// EVM-compatible chains can fully support the go-ethereum StateDb interface. -// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference. -func (g *basicGasMeter) RefundGas(amount Gas, descriptor string) { - if g.consumed < amount { - panic(ErrorNegativeGasConsumed{Descriptor: descriptor}) - } - - g.consumed -= amount -} - -// IsPastLimit returns true if gas consumed is past limit, otherwise it returns false. -func (g *basicGasMeter) IsPastLimit() bool { - return g.consumed > g.limit -} - -// IsOutOfGas returns true if gas consumed is greater than or equal to gas limit, otherwise it returns false. -func (g *basicGasMeter) IsOutOfGas() bool { - return g.consumed >= g.limit -} - -// String returns the BasicGasMeter's gas limit and gas consumed. -func (g *basicGasMeter) String() string { - return fmt.Sprintf("BasicGasMeter:\n limit: %d\n consumed: %d", g.limit, g.consumed) -} - -type infiniteGasMeter struct { - consumed Gas -} - -// NewInfiniteGasMeter returns a new gas meter without a limit. -func NewInfiniteGasMeter() GasMeter { - return &infiniteGasMeter{ - consumed: 0, - } -} - -// GasConsumed returns the gas consumed from the GasMeter. -func (g *infiniteGasMeter) GasConsumed() Gas { - return g.consumed -} - -// GasConsumedToLimit returns the gas consumed from the GasMeter since the gas is not confined to a limit. -// NOTE: This behavior is only called when recovering from panic when BlockGasMeter consumes gas past the limit. -func (g *infiniteGasMeter) GasConsumedToLimit() Gas { - return g.consumed -} - -// GasRemaining returns MaxUint64 since limit is not confined in infiniteGasMeter. -func (g *infiniteGasMeter) GasRemaining() Gas { - return math.MaxUint64 -} - -// Limit returns MaxUint64 since limit is not confined in infiniteGasMeter. -func (g *infiniteGasMeter) Limit() Gas { - return math.MaxUint64 -} - -// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit. -func (g *infiniteGasMeter) ConsumeGas(amount Gas, descriptor string) { - var overflow bool - // TODO: Should we set the consumed field after overflow checking? - g.consumed, overflow = addUint64Overflow(g.consumed, amount) - if overflow { - panic(ErrorGasOverflow{descriptor}) - } -} - -// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the -// gas consumed, the function will panic. -// -// Use case: This functionality enables refunding gas to the trasaction or block gas pools so that -// EVM-compatible chains can fully support the go-ethereum StateDb interface. -// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference. -func (g *infiniteGasMeter) RefundGas(amount Gas, descriptor string) { - if g.consumed < amount { - panic(ErrorNegativeGasConsumed{Descriptor: descriptor}) - } - - g.consumed -= amount -} - -// IsPastLimit returns false since the gas limit is not confined. -func (g *infiniteGasMeter) IsPastLimit() bool { - return false -} - -// IsOutOfGas returns false since the gas limit is not confined. -func (g *infiniteGasMeter) IsOutOfGas() bool { - return false -} - -// String returns the InfiniteGasMeter's gas consumed. -func (g *infiniteGasMeter) String() string { - return fmt.Sprintf("InfiniteGasMeter:\n consumed: %d", g.consumed) -} - -// GasConfig defines gas cost for each operation on KVStores -type GasConfig struct { - HasCost Gas - DeleteCost Gas - ReadCostFlat Gas - ReadCostPerByte Gas - WriteCostFlat Gas - WriteCostPerByte Gas - IterNextCostFlat Gas -} - -// KVGasConfig returns a default gas config for KVStores. -func KVGasConfig() GasConfig { - return GasConfig{ - HasCost: 1000, - DeleteCost: 1000, - ReadCostFlat: 1000, - ReadCostPerByte: 3, - WriteCostFlat: 2000, - WriteCostPerByte: 30, - IterNextCostFlat: 30, - } -} - -// TransientGasConfig returns a default gas config for TransientStores. -func TransientGasConfig() GasConfig { - return GasConfig{ - HasCost: 100, - DeleteCost: 100, - ReadCostFlat: 100, - ReadCostPerByte: 0, - WriteCostFlat: 200, - WriteCostPerByte: 3, - IterNextCostFlat: 3, - } -} diff --git a/store/types/gas_test.go b/store/types/gas_test.go deleted file mode 100644 index f4b5a6abe5..0000000000 --- a/store/types/gas_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package types - -import ( - "math" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestInfiniteGasMeter(t *testing.T) { - t.Parallel() - meter := NewInfiniteGasMeter() - require.Equal(t, uint64(math.MaxUint64), meter.Limit()) - require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) - require.Equal(t, uint64(0), meter.GasConsumed()) - require.Equal(t, uint64(0), meter.GasConsumedToLimit()) - meter.ConsumeGas(10, "consume 10") - require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) - require.Equal(t, uint64(10), meter.GasConsumed()) - require.Equal(t, uint64(10), meter.GasConsumedToLimit()) - meter.RefundGas(1, "refund 1") - require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) - require.Equal(t, uint64(9), meter.GasConsumed()) - require.False(t, meter.IsPastLimit()) - require.False(t, meter.IsOutOfGas()) - meter.ConsumeGas(Gas(math.MaxUint64/2), "consume half max uint64") - require.Panics(t, func() { meter.ConsumeGas(Gas(math.MaxUint64/2)+2, "panic") }) - require.Panics(t, func() { meter.RefundGas(meter.GasConsumed()+1, "refund greater than consumed") }) -} - -func TestGasMeter(t *testing.T) { - t.Parallel() - cases := []struct { - limit Gas - usage []Gas - }{ - {10, []Gas{1, 2, 3, 4}}, - {1000, []Gas{40, 30, 20, 10, 900}}, - {100000, []Gas{99999, 1}}, - {100000000, []Gas{50000000, 40000000, 10000000}}, - {65535, []Gas{32768, 32767}}, - {65536, []Gas{32768, 32767, 1}}, - } - - for tcnum, tc := range cases { - meter := NewGasMeter(tc.limit) - used := uint64(0) - - for unum, usage := range tc.usage { - usage := usage - used += usage - require.NotPanics(t, func() { meter.ConsumeGas(usage, "") }, "Not exceeded limit but panicked. tc #%d, usage #%d", tcnum, unum) - require.Equal(t, used, meter.GasConsumed(), "Gas consumption not match. tc #%d, usage #%d", tcnum, unum) - require.Equal(t, tc.limit-used, meter.GasRemaining(), "Gas left not match. tc #%d, usage #%d", tcnum, unum) - require.Equal(t, used, meter.GasConsumedToLimit(), "Gas consumption (to limit) not match. tc #%d, usage #%d", tcnum, unum) - require.False(t, meter.IsPastLimit(), "Not exceeded limit but got IsPastLimit() true") - if unum < len(tc.usage)-1 { - require.False(t, meter.IsOutOfGas(), "Not yet at limit but got IsOutOfGas() true") - } else { - require.True(t, meter.IsOutOfGas(), "At limit but got IsOutOfGas() false") - } - } - - require.Panics(t, func() { meter.ConsumeGas(1, "") }, "Exceeded but not panicked. tc #%d", tcnum) - require.Equal(t, meter.GasConsumedToLimit(), meter.Limit(), "Gas consumption (to limit) not match limit") - require.Equal(t, meter.GasConsumed(), meter.Limit()+1, "Gas consumption not match limit+1") - require.Equal(t, uint64(0), meter.GasRemaining()) - - require.NotPanics(t, func() { meter.RefundGas(1, "refund 1") }) - require.Equal(t, meter.GasConsumed(), meter.Limit(), "Gas consumption not match with limit") - require.Equal(t, uint64(0), meter.GasRemaining()) - require.Panics(t, func() { meter.RefundGas(meter.GasConsumed()+1, "refund greater than consumed") }) - - require.NotPanics(t, func() { meter.RefundGas(meter.GasConsumed(), "refund consumed gas") }) - require.Equal(t, meter.Limit(), meter.GasRemaining()) - - meter2 := NewGasMeter(math.MaxUint64) - require.Equal(t, uint64(math.MaxUint64), meter2.GasRemaining()) - meter2.ConsumeGas(Gas(math.MaxUint64/2), "consume half max uint64") - require.Equal(t, Gas(math.MaxUint64-(math.MaxUint64/2)), meter2.GasRemaining()) - require.Panics(t, func() { meter2.ConsumeGas(Gas(math.MaxUint64/2)+2, "panic") }) - } -} - -func TestAddUint64Overflow(t *testing.T) { - t.Parallel() - testCases := []struct { - a, b uint64 - result uint64 - overflow bool - }{ - {0, 0, 0, false}, - {100, 100, 200, false}, - {math.MaxUint64 / 2, math.MaxUint64/2 + 1, math.MaxUint64, false}, - {math.MaxUint64 / 2, math.MaxUint64/2 + 2, 0, true}, - } - - for i, tc := range testCases { - res, overflow := addUint64Overflow(tc.a, tc.b) - require.Equal( - t, tc.overflow, overflow, - "invalid overflow result; tc: #%d, a: %d, b: %d", i, tc.a, tc.b, - ) - require.Equal( - t, tc.result, res, - "invalid uint64 result; tc: #%d, a: %d, b: %d", i, tc.a, tc.b, - ) - } -} - -func TestTransientGasConfig(t *testing.T) { - t.Parallel() - config := TransientGasConfig() - require.Equal(t, config, GasConfig{ - HasCost: 100, - DeleteCost: 100, - ReadCostFlat: 100, - ReadCostPerByte: 0, - WriteCostFlat: 200, - WriteCostPerByte: 3, - IterNextCostFlat: 3, - }) -} diff --git a/store/types/iterator.go b/store/types/iterator.go deleted file mode 100644 index a328e87a68..0000000000 --- a/store/types/iterator.go +++ /dev/null @@ -1,60 +0,0 @@ -package types - -import ( - "fmt" -) - -// KVStorePrefixIteratorPaginated returns iterator over items in the selected page. -// Items iterated and skipped in ascending order. -func KVStorePrefixIteratorPaginated(kvs KVStore, prefix []byte, page, limit uint) Iterator { - pi := &PaginatedIterator{ - Iterator: KVStorePrefixIterator(kvs, prefix), - page: page, - limit: limit, - } - pi.skip() - return pi -} - -// KVStoreReversePrefixIteratorPaginated returns iterator over items in the selected page. -// Items iterated and skipped in descending order. -func KVStoreReversePrefixIteratorPaginated(kvs KVStore, prefix []byte, page, limit uint) Iterator { - pi := &PaginatedIterator{ - Iterator: KVStoreReversePrefixIterator(kvs, prefix), - page: page, - limit: limit, - } - pi.skip() - return pi -} - -// PaginatedIterator is a wrapper around Iterator that iterates over values starting for given page and limit. -type PaginatedIterator struct { - Iterator - - page, limit uint // provided during initialization - iterated uint // incremented in a call to Next -} - -func (pi *PaginatedIterator) skip() { - for i := (pi.page - 1) * pi.limit; i > 0 && pi.Iterator.Valid(); i-- { - pi.Iterator.Next() - } -} - -// Next will panic after limit is reached. -func (pi *PaginatedIterator) Next() { - if !pi.Valid() { - panic(fmt.Sprintf("PaginatedIterator reached limit %d", pi.limit)) - } - pi.Iterator.Next() - pi.iterated++ -} - -// Valid if below limit and underlying iterator is valid. -func (pi *PaginatedIterator) Valid() bool { - if pi.iterated >= pi.limit { - return false - } - return pi.Iterator.Valid() -} diff --git a/store/types/iterator_test.go b/store/types/iterator_test.go deleted file mode 100644 index a804b092c8..0000000000 --- a/store/types/iterator_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package types_test - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/types" -) - -func newMemTestKVStore(t *testing.T) types.KVStore { - t.Helper() - db := dbm.NewMemDB() - store, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - return store -} - -func TestPaginatedIterator(t *testing.T) { - kvs := newMemTestKVStore(t) - total := 10 - lth := total - 1 - asc := make([][]byte, total) - desc := make([][]byte, total) - // store returns values in lexicographic order (or reverse lex order) - for i := 0; i < total; i++ { - key := []byte{byte(i)} - kvs.Set(key, key) - asc[i] = key - desc[lth-i] = key - } - type testCase struct { - desc string - page, limit uint - result [][]byte - reverse bool - } - for _, tc := range []testCase{ - { - desc: "FirstChunk", - page: 1, - limit: 4, - result: asc[:4], - }, - { - desc: "SecondChunk", - page: 2, - limit: 4, - result: asc[4:8], - }, - { - desc: "ThirdChunkHalf", - page: 3, - limit: 4, - result: asc[8:], - }, - { - desc: "OverLimit", - page: 10, - limit: 10, - result: [][]byte{}, - }, - { - desc: "ZeroLimit", - page: 1, - result: [][]byte{}, - }, - { - desc: "ReverseFirstChunk", - page: 1, - limit: 6, - result: desc[:6], - reverse: true, - }, - { - desc: "ReverseSecondChunk", - page: 2, - limit: 6, - result: desc[6:], - reverse: true, - }, - } { - tc := tc - t.Run(tc.desc, func(t *testing.T) { - var iter types.Iterator - if tc.reverse { - iter = types.KVStoreReversePrefixIteratorPaginated(kvs, nil, tc.page, tc.limit) - } else { - iter = types.KVStorePrefixIteratorPaginated(kvs, nil, tc.page, tc.limit) - } - defer iter.Close() - - result := [][]byte{} - for ; iter.Valid(); iter.Next() { - result = append(result, iter.Key()) - } - - require.Equal(t, tc.result, result) - require.False(t, iter.Valid()) - }) - } -} - -func TestPaginatedIteratorPanicIfInvalid(t *testing.T) { - kvs := newMemTestKVStore(t) - - iter := types.KVStorePrefixIteratorPaginated(kvs, nil, 1, 1) - defer iter.Close() - require.False(t, iter.Valid()) - require.Panics(t, func() { iter.Next() }) // "iterator is empty" - - kvs.Set([]byte{1}, []byte{}) - - iter = types.KVStorePrefixIteratorPaginated(kvs, nil, 1, 0) - defer iter.Close() - require.False(t, iter.Valid()) - require.Panics(t, func() { iter.Next() }) // "not empty but limit is zero" -} diff --git a/store/types/listening.go b/store/types/listening.go deleted file mode 100644 index 75828793ff..0000000000 --- a/store/types/listening.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -// MemoryListener listens to the state writes and accumulate the records in memory. -type MemoryListener struct { - stateCache []*StoreKVPair -} - -// NewMemoryListener creates a listener that accumulate the state writes in memory. -func NewMemoryListener() *MemoryListener { - return &MemoryListener{} -} - -// OnWrite implements MemoryListener interface -func (fl *MemoryListener) OnWrite(storeKey StoreKey, key, value []byte, delete bool) { - fl.stateCache = append(fl.stateCache, &StoreKVPair{ - StoreKey: storeKey.Name(), - Delete: delete, - Key: key, - Value: value, - }) -} - -// PopStateCache returns the current state caches and set to nil -func (fl *MemoryListener) PopStateCache() []*StoreKVPair { - res := fl.stateCache - fl.stateCache = nil - return res -} diff --git a/store/types/listening.pb.go b/store/types/listening.pb.go deleted file mode 100644 index aab2ad57e3..0000000000 --- a/store/types/listening.pb.go +++ /dev/null @@ -1,784 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/v1beta1/listening.proto - -package types - -import ( - fmt "fmt" - types "github.com/cometbft/cometbft/abci/types" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and Deletes) -// It optionally includes the StoreKey for the originating KVStore and a Boolean flag to distinguish between Sets and -// Deletes -// -// Since: cosmos-sdk 0.43 -type StoreKVPair struct { - StoreKey string `protobuf:"bytes,1,opt,name=store_key,json=storeKey,proto3" json:"store_key,omitempty"` - Delete bool `protobuf:"varint,2,opt,name=delete,proto3" json:"delete,omitempty"` - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *StoreKVPair) Reset() { *m = StoreKVPair{} } -func (m *StoreKVPair) String() string { return proto.CompactTextString(m) } -func (*StoreKVPair) ProtoMessage() {} -func (*StoreKVPair) Descriptor() ([]byte, []int) { - return fileDescriptor_b6caeb9d7b7c7c10, []int{0} -} -func (m *StoreKVPair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StoreKVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StoreKVPair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StoreKVPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_StoreKVPair.Merge(m, src) -} -func (m *StoreKVPair) XXX_Size() int { - return m.Size() -} -func (m *StoreKVPair) XXX_DiscardUnknown() { - xxx_messageInfo_StoreKVPair.DiscardUnknown(m) -} - -var xxx_messageInfo_StoreKVPair proto.InternalMessageInfo - -func (m *StoreKVPair) GetStoreKey() string { - if m != nil { - return m.StoreKey - } - return "" -} - -func (m *StoreKVPair) GetDelete() bool { - if m != nil { - return m.Delete - } - return false -} - -func (m *StoreKVPair) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *StoreKVPair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -// BlockMetadata contains all the abci event data of a block -// the file streamer dump them into files together with the state changes. -type BlockMetadata struct { - ResponseCommit *types.ResponseCommit `protobuf:"bytes,6,opt,name=response_commit,json=responseCommit,proto3" json:"response_commit,omitempty"` - RequestFinalizeBlock *types.RequestFinalizeBlock `protobuf:"bytes,7,opt,name=request_finalize_block,json=requestFinalizeBlock,proto3" json:"request_finalize_block,omitempty"` - ResponseFinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,8,opt,name=response_finalize_block,json=responseFinalizeBlock,proto3" json:"response_finalize_block,omitempty"` -} - -func (m *BlockMetadata) Reset() { *m = BlockMetadata{} } -func (m *BlockMetadata) String() string { return proto.CompactTextString(m) } -func (*BlockMetadata) ProtoMessage() {} -func (*BlockMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_b6caeb9d7b7c7c10, []int{1} -} -func (m *BlockMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlockMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockMetadata.Merge(m, src) -} -func (m *BlockMetadata) XXX_Size() int { - return m.Size() -} -func (m *BlockMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_BlockMetadata.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockMetadata proto.InternalMessageInfo - -func (m *BlockMetadata) GetResponseCommit() *types.ResponseCommit { - if m != nil { - return m.ResponseCommit - } - return nil -} - -func (m *BlockMetadata) GetRequestFinalizeBlock() *types.RequestFinalizeBlock { - if m != nil { - return m.RequestFinalizeBlock - } - return nil -} - -func (m *BlockMetadata) GetResponseFinalizeBlock() *types.ResponseFinalizeBlock { - if m != nil { - return m.ResponseFinalizeBlock - } - return nil -} - -func init() { - proto.RegisterType((*StoreKVPair)(nil), "cosmos.store.v1beta1.StoreKVPair") - proto.RegisterType((*BlockMetadata)(nil), "cosmos.store.v1beta1.BlockMetadata") -} - -func init() { - proto.RegisterFile("cosmos/store/v1beta1/listening.proto", fileDescriptor_b6caeb9d7b7c7c10) -} - -var fileDescriptor_b6caeb9d7b7c7c10 = []byte{ - // 374 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0x8a, 0xd3, 0x40, - 0x18, 0xc5, 0x3b, 0xc9, 0x34, 0xce, 0xce, 0xfa, 0x27, 0x0c, 0x75, 0x0d, 0x2e, 0xc4, 0xb0, 0xa8, - 0xe4, 0x2a, 0x61, 0xd7, 0x37, 0xa8, 0x20, 0x92, 0x22, 0x48, 0x04, 0x2f, 0x14, 0x0c, 0x93, 0xe4, - 0x53, 0xc6, 0x26, 0x99, 0x3a, 0x33, 0x2d, 0xd4, 0xa7, 0xf0, 0x61, 0x7c, 0x08, 0x2f, 0x7b, 0xe9, - 0xa5, 0xb4, 0x2f, 0x22, 0x99, 0x04, 0xa5, 0x52, 0xef, 0x72, 0x0e, 0xbf, 0xef, 0xe4, 0xc0, 0x19, - 0xfa, 0xb8, 0x92, 0xba, 0x95, 0x3a, 0xd5, 0x46, 0x2a, 0x48, 0x37, 0xd7, 0x25, 0x18, 0x7e, 0x9d, - 0x36, 0x42, 0x1b, 0xe8, 0x44, 0xf7, 0x29, 0x59, 0x29, 0x69, 0x24, 0x9b, 0x0d, 0x54, 0x62, 0xa9, - 0x64, 0xa4, 0x1e, 0x5e, 0x1a, 0xe8, 0x6a, 0x50, 0xad, 0xe8, 0x4c, 0xca, 0xcb, 0x4a, 0xa4, 0x66, - 0xbb, 0x02, 0x3d, 0x9c, 0x5c, 0x7d, 0xa6, 0xe7, 0x6f, 0x7a, 0x7a, 0xf1, 0xf6, 0x35, 0x17, 0x8a, - 0x5d, 0xd2, 0x33, 0x7b, 0x5c, 0x2c, 0x61, 0x1b, 0xa0, 0x08, 0xc5, 0x67, 0x39, 0xb1, 0xc6, 0x02, - 0xb6, 0xec, 0x82, 0x7a, 0x35, 0x34, 0x60, 0x20, 0x70, 0x22, 0x14, 0x93, 0x7c, 0x54, 0xcc, 0xa7, - 0x6e, 0x8f, 0xbb, 0x11, 0x8a, 0x6f, 0xe7, 0xfd, 0x27, 0x9b, 0xd1, 0xe9, 0x86, 0x37, 0x6b, 0x08, - 0xb0, 0xf5, 0x06, 0x71, 0xf5, 0xdd, 0xa1, 0x77, 0xe6, 0x8d, 0xac, 0x96, 0xaf, 0xc0, 0xf0, 0x9a, - 0x1b, 0xce, 0x5e, 0xd2, 0x7b, 0x0a, 0xf4, 0x4a, 0x76, 0x1a, 0x8a, 0x4a, 0xb6, 0xad, 0x30, 0x81, - 0x17, 0xa1, 0xf8, 0xfc, 0xe6, 0x51, 0xf2, 0xb7, 0x74, 0xd2, 0x97, 0x4e, 0xf2, 0x91, 0x7b, 0x6e, - 0xb1, 0xfc, 0xae, 0x3a, 0xd2, 0xec, 0x3d, 0xbd, 0x50, 0xf0, 0x65, 0x0d, 0xda, 0x14, 0x1f, 0x45, - 0xc7, 0x1b, 0xf1, 0x15, 0x8a, 0xb2, 0xff, 0x57, 0x70, 0xcb, 0x06, 0x3e, 0x39, 0x11, 0x68, 0xf1, - 0x17, 0x23, 0x6d, 0x8b, 0xe5, 0x33, 0x75, 0xc2, 0x65, 0x1f, 0xe8, 0x83, 0x3f, 0x35, 0xff, 0x49, - 0x27, 0x36, 0xfd, 0xe9, 0x7f, 0xeb, 0x1e, 0xc7, 0xdf, 0x57, 0xa7, 0xec, 0x0c, 0x13, 0xe4, 0x3b, - 0x19, 0x26, 0x8e, 0xef, 0x66, 0x98, 0xb8, 0x3e, 0xce, 0x30, 0xc1, 0xfe, 0x34, 0xc3, 0x64, 0xea, - 0x7b, 0xf3, 0x9b, 0x1f, 0xfb, 0x10, 0xed, 0xf6, 0x21, 0xfa, 0xb5, 0x0f, 0xd1, 0xb7, 0x43, 0x38, - 0xd9, 0x1d, 0xc2, 0xc9, 0xcf, 0x43, 0x38, 0x79, 0x17, 0x0c, 0x7b, 0xeb, 0x7a, 0x99, 0x08, 0x39, - 0xbe, 0x0d, 0x3b, 0x6e, 0xe9, 0xd9, 0x75, 0x9f, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x68, 0x61, - 0xc9, 0x0c, 0x38, 0x02, 0x00, 0x00, -} - -func (m *StoreKVPair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StoreKVPair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StoreKVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintListening(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x22 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintListening(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x1a - } - if m.Delete { - i-- - if m.Delete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.StoreKey) > 0 { - i -= len(m.StoreKey) - copy(dAtA[i:], m.StoreKey) - i = encodeVarintListening(dAtA, i, uint64(len(m.StoreKey))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *BlockMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlockMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlockMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ResponseFinalizeBlock != nil { - { - size, err := m.ResponseFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintListening(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.RequestFinalizeBlock != nil { - { - size, err := m.RequestFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintListening(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.ResponseCommit != nil { - { - size, err := m.ResponseCommit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintListening(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} - -func encodeVarintListening(dAtA []byte, offset int, v uint64) int { - offset -= sovListening(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *StoreKVPair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.StoreKey) - if l > 0 { - n += 1 + l + sovListening(uint64(l)) - } - if m.Delete { - n += 2 - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovListening(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovListening(uint64(l)) - } - return n -} - -func (m *BlockMetadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResponseCommit != nil { - l = m.ResponseCommit.Size() - n += 1 + l + sovListening(uint64(l)) - } - if m.RequestFinalizeBlock != nil { - l = m.RequestFinalizeBlock.Size() - n += 1 + l + sovListening(uint64(l)) - } - if m.ResponseFinalizeBlock != nil { - l = m.ResponseFinalizeBlock.Size() - n += 1 + l + sovListening(uint64(l)) - } - return n -} - -func sovListening(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozListening(x uint64) (n int) { - return sovListening(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *StoreKVPair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StoreKVPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StoreKVPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoreKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoreKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Delete = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipListening(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthListening - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlockMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlockMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlockMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseCommit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResponseCommit == nil { - m.ResponseCommit = &types.ResponseCommit{} - } - if err := m.ResponseCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestFinalizeBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestFinalizeBlock == nil { - m.RequestFinalizeBlock = &types.RequestFinalizeBlock{} - } - if err := m.RequestFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseFinalizeBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResponseFinalizeBlock == nil { - m.ResponseFinalizeBlock = &types.ResponseFinalizeBlock{} - } - if err := m.ResponseFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipListening(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthListening - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipListening(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowListening - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowListening - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowListening - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthListening - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupListening - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthListening - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthListening = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowListening = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupListening = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/types/listening_test.go b/store/types/listening_test.go deleted file mode 100644 index 034d2a4960..0000000000 --- a/store/types/listening_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNewStoreKVPairWriteListener(t *testing.T) { - listener := NewMemoryListener() - require.IsType(t, &MemoryListener{}, listener) -} - -func TestOnWrite(t *testing.T) { - listener := NewMemoryListener() - - testStoreKey := NewKVStoreKey("test_key") - testKey := []byte("testing123") - testValue := []byte("testing321") - - // test set - listener.OnWrite(testStoreKey, testKey, testValue, false) - outputKVPair := listener.PopStateCache()[0] - expectedOutputKVPair := &StoreKVPair{ - Key: testKey, - Value: testValue, - StoreKey: testStoreKey.Name(), - Delete: false, - } - require.EqualValues(t, expectedOutputKVPair, outputKVPair) - - // test delete - listener.OnWrite(testStoreKey, testKey, testValue, true) - outputKVPair = listener.PopStateCache()[0] - expectedOutputKVPair = &StoreKVPair{ - Key: testKey, - Value: testValue, - StoreKey: testStoreKey.Name(), - Delete: true, - } - require.EqualValues(t, expectedOutputKVPair, outputKVPair) -} diff --git a/store/types/proof.go b/store/types/proof.go deleted file mode 100644 index b1f4a115ed..0000000000 --- a/store/types/proof.go +++ /dev/null @@ -1,174 +0,0 @@ -package types - -import ( - "fmt" - - "github.com/cometbft/cometbft/crypto/merkle" - cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - ics23 "github.com/cosmos/ics23/go" - - errorsmod "cosmossdk.io/errors" - sdkmaps "cosmossdk.io/store/internal/maps" - sdkproofs "cosmossdk.io/store/internal/proofs" -) - -const ( - ProofOpIAVLCommitment = "ics23:iavl" - ProofOpSimpleMerkleCommitment = "ics23:simple" - ProofOpSMTCommitment = "ics23:smt" -) - -// CommitmentOp implements merkle.ProofOperator by wrapping an ics23 CommitmentProof -// It also contains a Key field to determine which key the proof is proving. -// NOTE: CommitmentProof currently can either be ExistenceProof or NonexistenceProof -// -// Type and Spec are classified by the kind of merkle proof it represents allowing -// the code to be reused by more types. Spec is never on the wire, but mapped from type in the code. -type CommitmentOp struct { - Type string - Spec *ics23.ProofSpec - Key []byte - Proof *ics23.CommitmentProof -} - -var _ merkle.ProofOperator = CommitmentOp{} - -func NewIavlCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpIAVLCommitment, - Spec: ics23.IavlSpec, - Key: key, - Proof: proof, - } -} - -func NewSimpleMerkleCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpSimpleMerkleCommitment, - Spec: ics23.TendermintSpec, - Key: key, - Proof: proof, - } -} - -func NewSmtCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpSMTCommitment, - Spec: ics23.SmtSpec, - Key: key, - Proof: proof, - } -} - -// CommitmentOpDecoder takes a merkle.ProofOp and attempts to decode it into a CommitmentOp ProofOperator -// The proofOp.Data is just a marshaled CommitmentProof. The Key of the CommitmentOp is extracted -// from the unmarshalled proof. -func CommitmentOpDecoder(pop cmtprotocrypto.ProofOp) (merkle.ProofOperator, error) { - var spec *ics23.ProofSpec - switch pop.Type { - case ProofOpIAVLCommitment: - spec = ics23.IavlSpec - case ProofOpSimpleMerkleCommitment: - spec = ics23.TendermintSpec - case ProofOpSMTCommitment: - spec = ics23.SmtSpec - default: - return nil, errorsmod.Wrapf(ErrInvalidProof, "unexpected ProofOp.Type; got %s, want supported ics23 subtypes 'ProofOpSimpleMerkleCommitment', 'ProofOpIAVLCommitment', or 'ProofOpSMTCommitment'", pop.Type) - } - - proof := &ics23.CommitmentProof{} - err := proof.Unmarshal(pop.Data) - if err != nil { - return nil, err - } - - op := CommitmentOp{ - Type: pop.Type, - Key: pop.Key, - Spec: spec, - Proof: proof, - } - return op, nil -} - -func (op CommitmentOp) GetKey() []byte { - return op.Key -} - -// Run takes in a list of arguments and attempts to run the proof op against these arguments -// Returns the root wrapped in [][]byte if the proof op succeeds with given args. If not, -// it will return an error. -// -// CommitmentOp will accept args of length 1 or length 0 -// If length 1 args is passed in, then CommitmentOp will attempt to prove the existence of the key -// with the value provided by args[0] using the embedded CommitmentProof and return the CommitmentRoot of the proof -// If length 0 args is passed in, then CommitmentOp will attempt to prove the absence of the key -// in the CommitmentOp and return the CommitmentRoot of the proof -func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) { - // calculate root from proof - root, err := op.Proof.Calculate() - if err != nil { - return nil, errorsmod.Wrapf(ErrInvalidProof, "could not calculate root for proof: %v", err) - } - // Only support an existence proof or nonexistence proof (batch proofs currently unsupported) - switch len(args) { - case 0: - // Args are nil, so we verify the absence of the key. - absent := ics23.VerifyNonMembership(op.Spec, root, op.Proof, op.Key) - if !absent { - return nil, errorsmod.Wrapf(ErrInvalidProof, "proof did not verify absence of key: %s", string(op.Key)) - } - - case 1: - // Args is length 1, verify existence of key with value args[0] - if !ics23.VerifyMembership(op.Spec, root, op.Proof, op.Key, args[0]) { - return nil, errorsmod.Wrapf(ErrInvalidProof, "proof did not verify existence of key %s with given value %x", op.Key, args[0]) - } - default: - return nil, errorsmod.Wrapf(ErrInvalidProof, "args must be length 0 or 1, got: %d", len(args)) - } - - return [][]byte{root}, nil -} - -// ProofOp implements ProofOperator interface and converts a CommitmentOp -// into a merkle.ProofOp format that can later be decoded by CommitmentOpDecoder -// back into a CommitmentOp for proof verification -func (op CommitmentOp) ProofOp() cmtprotocrypto.ProofOp { - bz, err := op.Proof.Marshal() - if err != nil { - panic(err.Error()) - } - return cmtprotocrypto.ProofOp{ - Type: op.Type, - Key: op.Key, - Data: bz, - } -} - -// ProofOpFromMap generates a single proof from a map and converts it to a ProofOp. -func ProofOpFromMap(cmap map[string][]byte, storeName string) (ret cmtprotocrypto.ProofOp, err error) { - _, proofs, _ := sdkmaps.ProofsFromMap(cmap) - - proof := proofs[storeName] - if proof == nil { - err = fmt.Errorf("ProofOp for %s but not registered store name", storeName) - return - } - - // convert merkle.SimpleProof to CommitmentProof - existProof, err := sdkproofs.ConvertExistenceProof(proof, []byte(storeName), cmap[storeName]) - if err != nil { - err = fmt.Errorf("could not convert simple proof to existence proof: %w", err) - return - } - - commitmentProof := &ics23.CommitmentProof{ - Proof: &ics23.CommitmentProof_Exist{ - Exist: existProof, - }, - } - - ret = NewSimpleMerkleCommitmentOp([]byte(storeName), commitmentProof).ProofOp() - return -} diff --git a/store/types/store.go b/store/types/store.go deleted file mode 100644 index 8980179950..0000000000 --- a/store/types/store.go +++ /dev/null @@ -1,534 +0,0 @@ -package types - -import ( - "fmt" - "io" - - "github.com/cometbft/cometbft/proto/tendermint/crypto" - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - snapshottypes "cosmossdk.io/store/snapshots/types" -) - -type Store interface { - GetStoreType() StoreType - CacheWrapper -} - -// something that can persist to disk -type Committer interface { - Commit() CommitID - LastCommitID() CommitID - - // WorkingHash returns the hash of the KVStore's state before commit. - WorkingHash() []byte - - SetPruning(pruningtypes.PruningOptions) - GetPruning() pruningtypes.PruningOptions -} - -// Stores of MultiStore must implement CommitStore. -type CommitStore interface { - Committer - Store -} - -// Queryable allows a Store to expose internal state to the abci.Query -// interface. Multistore can route requests to the proper Store. -// -// This is an optional, but useful extension to any CommitStore -type Queryable interface { - Query(*RequestQuery) (*ResponseQuery, error) -} - -type RequestQuery struct { - Data []byte - Path string - Height int64 - Prove bool -} - -type ResponseQuery struct { - Code uint32 - Log string - Info string - Index int64 - Key []byte - Value []byte - ProofOps *crypto.ProofOps - Height int64 - Codespace string -} - -//---------------------------------------- -// MultiStore - -// StoreUpgrades defines a series of transformations to apply the multistore db upon load -type StoreUpgrades struct { - Added []string `json:"added"` - Renamed []StoreRename `json:"renamed"` - Deleted []string `json:"deleted"` -} - -// StoreRename defines a name change of a sub-store. -// All data previously under a PrefixStore with OldKey will be copied -// to a PrefixStore with NewKey, then deleted from OldKey store. -type StoreRename struct { - OldKey string `json:"old_key"` - NewKey string `json:"new_key"` -} - -// IsAdded returns true if the given key should be added -func (s *StoreUpgrades) IsAdded(key string) bool { - if s == nil { - return false - } - for _, added := range s.Added { - if key == added { - return true - } - } - return false -} - -// IsDeleted returns true if the given key should be deleted -func (s *StoreUpgrades) IsDeleted(key string) bool { - if s == nil { - return false - } - for _, d := range s.Deleted { - if d == key { - return true - } - } - return false -} - -// RenamedFrom returns the oldKey if it was renamed -// Returns "" if it was not renamed -func (s *StoreUpgrades) RenamedFrom(key string) string { - if s == nil { - return "" - } - for _, re := range s.Renamed { - if re.NewKey == key { - return re.OldKey - } - } - return "" -} - -type MultiStore interface { - Store - - // Branches MultiStore into a cached storage object. - // NOTE: Caller should probably not call .Write() on each, but - // call CacheMultiStore.Write(). - CacheMultiStore() CacheMultiStore - - // CacheMultiStoreWithVersion branches the underlying MultiStore where - // each stored is loaded at a specific version (height). - CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error) - - // Convenience for fetching substores. - // If the store does not exist, panics. - GetStore(StoreKey) Store - GetKVStore(StoreKey) KVStore - - // TracingEnabled returns if tracing is enabled for the MultiStore. - TracingEnabled() bool - - // SetTracer sets the tracer for the MultiStore that the underlying - // stores will utilize to trace operations. The modified MultiStore is - // returned. - SetTracer(w io.Writer) MultiStore - - // SetTracingContext sets the tracing context for a MultiStore. It is - // implied that the caller should update the context when necessary between - // tracing operations. The modified MultiStore is returned. - SetTracingContext(TraceContext) MultiStore - - // LatestVersion returns the latest version in the store - LatestVersion() int64 -} - -// From MultiStore.CacheMultiStore().... -type CacheMultiStore interface { - MultiStore - Write() // Writes operations to underlying KVStore -} - -// CommitMultiStore is an interface for a MultiStore without cache capabilities. -type CommitMultiStore interface { - Committer - MultiStore - snapshottypes.Snapshotter - - // Mount a store of type using the given db. - // If db == nil, the new store will use the CommitMultiStore db. - MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB) - - // Panics on a nil key. - GetCommitStore(key StoreKey) CommitStore - - // Panics on a nil key. - GetCommitKVStore(key StoreKey) CommitKVStore - - // Load the latest persisted version. Called once after all calls to - // Mount*Store() are complete. - LoadLatestVersion() error - - // LoadLatestVersionAndUpgrade will load the latest version, but also - // rename/delete/create sub-store keys, before registering all the keys - // in order to handle breaking formats in migrations - LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades) error - - // LoadVersionAndUpgrade will load the named version, but also - // rename/delete/create sub-store keys, before registering all the keys - // in order to handle breaking formats in migrations - LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades) error - - // Load a specific persisted version. When you load an old version, or when - // the last commit attempt didn't complete, the next commit after loading - // must be idempotent (return the same commit id). Otherwise the behavior is - // undefined. - LoadVersion(ver int64) error - - // Set an inter-block (persistent) cache that maintains a mapping from - // StoreKeys to CommitKVStores. - SetInterBlockCache(MultiStorePersistentCache) - - // SetInitialVersion sets the initial version of the IAVL tree. It is used when - // starting a new chain at an arbitrary height. - SetInitialVersion(version int64) error - - // SetIAVLCacheSize sets the cache size of the IAVL tree. - SetIAVLCacheSize(size int) - - // SetIAVLDisableFastNode enables/disables fastnode feature on iavl. - SetIAVLDisableFastNode(disable bool) - - // RollbackToVersion rollback the db to specific version(height). - RollbackToVersion(version int64) error - - // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey - ListeningEnabled(key StoreKey) bool - - // AddListeners adds a listener for the KVStore belonging to the provided StoreKey - AddListeners(keys []StoreKey) - - // PopStateCache returns the accumulated state change messages from the CommitMultiStore - PopStateCache() []*StoreKVPair - - // SetMetrics sets the metrics for the KVStore - SetMetrics(metrics metrics.StoreMetrics) -} - -//---------subsp------------------------------- -// KVStore - -// BasicKVStore is a simple interface to get/set data -type BasicKVStore interface { - // Get returns nil if key doesn't exist. Panics on nil key. - Get(key []byte) []byte - - // Has checks if a key exists. Panics on nil key. - Has(key []byte) bool - - // Set sets the key. Panics on nil key or value. - Set(key, value []byte) - - // Delete deletes the key. Panics on nil key. - Delete(key []byte) -} - -// KVStore additionally provides iteration and deletion -type KVStore interface { - Store - BasicKVStore - - // Iterator over a domain of keys in ascending order. End is exclusive. - // Start must be less than end, or the Iterator is invalid. - // Iterator must be closed by caller. - // To iterate over entire domain, use store.Iterator(nil, nil) - // CONTRACT: No writes may happen within a domain while an iterator exists over it. - // Exceptionally allowed for cachekv.Store, safe to write in the modules. - Iterator(start, end []byte) Iterator - - // Iterator over a domain of keys in descending order. End is exclusive. - // Start must be less than end, or the Iterator is invalid. - // Iterator must be closed by caller. - // CONTRACT: No writes may happen within a domain while an iterator exists over it. - // Exceptionally allowed for cachekv.Store, safe to write in the modules. - ReverseIterator(start, end []byte) Iterator -} - -// Iterator is an alias db's Iterator for convenience. -type Iterator = dbm.Iterator - -// CacheKVStore branches a KVStore and provides read cache functionality. -// After calling .Write() on the CacheKVStore, all previously created -// CacheKVStores on the object expire. -type CacheKVStore interface { - KVStore - - // Writes operations to underlying KVStore - Write() -} - -// CommitKVStore is an interface for MultiStore. -type CommitKVStore interface { - Committer - KVStore -} - -//---------------------------------------- -// CacheWrap - -// CacheWrap is the most appropriate interface for store ephemeral branching and cache. -// For example, IAVLStore.CacheWrap() returns a CacheKVStore. CacheWrap should not return -// a Committer, since Commit ephemeral store make no sense. It can return KVStore, -// HeapStore, SpaceStore, etc. -type CacheWrap interface { - // Write syncs with the underlying store. - Write() - - // CacheWrap recursively wraps again. - CacheWrap() CacheWrap - - // CacheWrapWithTrace recursively wraps again with tracing enabled. - CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap -} - -type CacheWrapper interface { - // CacheWrap branches a store. - CacheWrap() CacheWrap - - // CacheWrapWithTrace branches a store with tracing enabled. - CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap -} - -func (cid CommitID) IsZero() bool { - return cid.Version == 0 && len(cid.Hash) == 0 -} - -func (cid CommitID) String() string { - return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version) -} - -//---------------------------------------- -// Store types - -// kind of store -type StoreType int - -const ( - StoreTypeMulti StoreType = iota - StoreTypeDB - StoreTypeIAVL - StoreTypeTransient - StoreTypeMemory - StoreTypeSMT - StoreTypePersistent -) - -func (st StoreType) String() string { - switch st { - case StoreTypeMulti: - return "StoreTypeMulti" - - case StoreTypeDB: - return "StoreTypeDB" - - case StoreTypeIAVL: - return "StoreTypeIAVL" - - case StoreTypeTransient: - return "StoreTypeTransient" - - case StoreTypeMemory: - return "StoreTypeMemory" - - case StoreTypeSMT: - return "StoreTypeSMT" - - case StoreTypePersistent: - return "StoreTypePersistent" - } - - return "unknown store type" -} - -//---------------------------------------- -// Keys for accessing substores - -// StoreKey is a key used to index stores in a MultiStore. -type StoreKey interface { - Name() string - String() string -} - -// CapabilityKey represent the Cosmos SDK keys for object-capability -// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures -type CapabilityKey StoreKey - -// KVStoreKey is used for accessing substores. -// Only the pointer value should ever be used - it functions as a capabilities key. -type KVStoreKey struct { - name string -} - -// NewKVStoreKey returns a new pointer to a KVStoreKey. -// Use a pointer so keys don't collide. -func NewKVStoreKey(name string) *KVStoreKey { - if name == "" { - panic("empty key name not allowed") - } - return &KVStoreKey{ - name: name, - } -} - -// NewKVStoreKeys returns a map of new pointers to KVStoreKey's. -// The function will panic if there is a potential conflict in names (see `assertNoPrefix` -// function for more details). -func NewKVStoreKeys(names ...string) map[string]*KVStoreKey { - assertNoCommonPrefix(names) - keys := make(map[string]*KVStoreKey, len(names)) - for _, n := range names { - keys[n] = NewKVStoreKey(n) - } - - return keys -} - -func (key *KVStoreKey) Name() string { - return key.name -} - -func (key *KVStoreKey) String() string { - return fmt.Sprintf("KVStoreKey{%p, %s}", key, key.name) -} - -// TransientStoreKey is used for indexing transient stores in a MultiStore -type TransientStoreKey struct { - name string -} - -// Constructs new TransientStoreKey -// Must return a pointer according to the ocap principle -func NewTransientStoreKey(name string) *TransientStoreKey { - return &TransientStoreKey{ - name: name, - } -} - -// Implements StoreKey -func (key *TransientStoreKey) Name() string { - return key.name -} - -// Implements StoreKey -func (key *TransientStoreKey) String() string { - return fmt.Sprintf("TransientStoreKey{%p, %s}", key, key.name) -} - -// MemoryStoreKey defines a typed key to be used with an in-memory KVStore. -type MemoryStoreKey struct { - name string -} - -func NewMemoryStoreKey(name string) *MemoryStoreKey { - return &MemoryStoreKey{name: name} -} - -// Name returns the name of the MemoryStoreKey. -func (key *MemoryStoreKey) Name() string { - return key.name -} - -// String returns a stringified representation of the MemoryStoreKey. -func (key *MemoryStoreKey) String() string { - return fmt.Sprintf("MemoryStoreKey{%p, %s}", key, key.name) -} - -//---------------------------------------- - -// TraceContext contains TraceKVStore context data. It will be written with -// every trace operation. -type TraceContext map[string]interface{} - -// Clone clones tc into another instance of TraceContext. -func (tc TraceContext) Clone() TraceContext { - ret := TraceContext{} - for k, v := range tc { - ret[k] = v - } - - return ret -} - -// Merge merges value of newTc into tc. -func (tc TraceContext) Merge(newTc TraceContext) TraceContext { - if tc == nil { - tc = TraceContext{} - } - - for k, v := range newTc { - tc[k] = v - } - - return tc -} - -// MultiStorePersistentCache defines an interface which provides inter-block -// (persistent) caching capabilities for multiple CommitKVStores based on StoreKeys. -type MultiStorePersistentCache interface { - // Wrap and return the provided CommitKVStore with an inter-block (persistent) - // cache. - GetStoreCache(key StoreKey, store CommitKVStore) CommitKVStore - - // Return the underlying CommitKVStore for a StoreKey. - Unwrap(key StoreKey) CommitKVStore - - // Reset the entire set of internal caches. - Reset() -} - -// StoreWithInitialVersion is a store that can have an arbitrary initial -// version. -type StoreWithInitialVersion interface { - // SetInitialVersion sets the initial version of the IAVL tree. It is used when - // starting a new chain at an arbitrary height. - SetInitialVersion(version int64) -} - -// NewTransientStoreKeys constructs a new map of TransientStoreKey's -// Must return pointers according to the ocap principle -// The function will panic if there is a potential conflict in names -// see `assertNoCommonPrefix` function for more details. -func NewTransientStoreKeys(names ...string) map[string]*TransientStoreKey { - assertNoCommonPrefix(names) - keys := make(map[string]*TransientStoreKey) - for _, n := range names { - keys[n] = NewTransientStoreKey(n) - } - - return keys -} - -// NewMemoryStoreKeys constructs a new map matching store key names to their -// respective MemoryStoreKey references. -// The function will panic if there is a potential conflict in names (see `assertNoPrefix` -// function for more details). -func NewMemoryStoreKeys(names ...string) map[string]*MemoryStoreKey { - assertNoCommonPrefix(names) - keys := make(map[string]*MemoryStoreKey) - for _, n := range names { - keys[n] = NewMemoryStoreKey(n) - } - - return keys -} diff --git a/store/types/store_test.go b/store/types/store_test.go deleted file mode 100644 index b6304d131b..0000000000 --- a/store/types/store_test.go +++ /dev/null @@ -1,240 +0,0 @@ -package types - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "gotest.tools/v3/assert" -) - -func TestStoreUpgrades(t *testing.T) { - t.Parallel() - type toAdd struct { - key string - } - type toDelete struct { - key string - delete bool - } - type toRename struct { - newkey string - result string - } - - cases := map[string]struct { - upgrades *StoreUpgrades - expectAdd []toAdd - expectDelete []toDelete - expectRename []toRename - }{ - "empty upgrade": { - expectDelete: []toDelete{{"foo", false}}, - expectRename: []toRename{{"foo", ""}}, - }, - "simple matches": { - upgrades: &StoreUpgrades{ - Deleted: []string{"foo"}, - Renamed: []StoreRename{{"bar", "baz"}}, - }, - expectDelete: []toDelete{{"foo", true}, {"bar", false}, {"baz", false}}, - expectRename: []toRename{{"foo", ""}, {"bar", ""}, {"baz", "bar"}}, - }, - "many data points": { - upgrades: &StoreUpgrades{ - Added: []string{"foo", "bar", "baz"}, - Deleted: []string{"one", "two", "three", "four", "five"}, - Renamed: []StoreRename{{"old", "new"}, {"white", "blue"}, {"black", "orange"}, {"fun", "boring"}}, - }, - expectAdd: []toAdd{{"foo"}, {"bar"}, {"baz"}}, - expectDelete: []toDelete{{"four", true}, {"six", false}, {"baz", false}}, - expectRename: []toRename{{"white", ""}, {"blue", "white"}, {"boring", "fun"}, {"missing", ""}}, - }, - } - - for name, tc := range cases { - tc := tc - t.Run(name, func(t *testing.T) { - for _, r := range tc.expectAdd { - assert.Equal(t, tc.upgrades.IsAdded(r.key), true) - } - for _, d := range tc.expectDelete { - assert.Equal(t, tc.upgrades.IsDeleted(d.key), d.delete) - } - for _, r := range tc.expectRename { - assert.Equal(t, tc.upgrades.RenamedFrom(r.newkey), r.result) - } - }) - } -} - -func TestCommitID(t *testing.T) { - t.Parallel() - require.True(t, CommitID{}.IsZero()) - require.False(t, CommitID{Version: int64(1)}.IsZero()) - require.False(t, CommitID{Hash: []byte("x")}.IsZero()) - require.Equal(t, "CommitID{[120 120 120 120]:64}", CommitID{Version: int64(100), Hash: []byte("xxxx")}.String()) -} - -func TestKVStoreKey(t *testing.T) { - t.Parallel() - key := NewKVStoreKey("test") - require.Equal(t, "test", key.name) - require.Equal(t, key.name, key.Name()) - require.Equal(t, fmt.Sprintf("KVStoreKey{%p, test}", key), key.String()) -} - -func TestNilKVStoreKey(t *testing.T) { - t.Parallel() - - require.Panics(t, func() { - _ = NewKVStoreKey("") - }, "setting an empty key should panic") -} - -func TestTransientStoreKey(t *testing.T) { - t.Parallel() - key := NewTransientStoreKey("test") - require.Equal(t, "test", key.name) - require.Equal(t, key.name, key.Name()) - require.Equal(t, fmt.Sprintf("TransientStoreKey{%p, test}", key), key.String()) -} - -func TestMemoryStoreKey(t *testing.T) { - t.Parallel() - key := NewMemoryStoreKey("test") - require.Equal(t, "test", key.name) - require.Equal(t, key.name, key.Name()) - require.Equal(t, fmt.Sprintf("MemoryStoreKey{%p, test}", key), key.String()) -} - -func TestTraceContext_Clone(t *testing.T) { - tests := []struct { - name string - tc TraceContext - want TraceContext - }{ - { - "nil TraceContext yields empty TraceContext", - nil, - TraceContext{}, - }, - { - "non-nil TraceContext yields equal TraceContext", - TraceContext{ - "value": 42, - }, - TraceContext{ - "value": 42, - }, - }, - { - "non-nil TraceContext yields equal TraceContext, for more than one key", - TraceContext{ - "value": 42, - "another": 24, - "weird": "string", - }, - TraceContext{ - "value": 42, - "another": 24, - "weird": "string", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.want, tt.tc.Clone()) - }) - } -} - -func TestTraceContext_Clone_is_deep(t *testing.T) { - original := TraceContext{ - "value": 42, - "another": 24, - "weird": "string", - } - - clone := original.Clone() - - clone["other"] = true - - require.NotEqual(t, original, clone) -} - -func TestTraceContext_Merge(t *testing.T) { - tests := []struct { - name string - tc TraceContext - other TraceContext - want TraceContext - }{ - { - "tc is nil, other is empty, yields an empty TraceContext", - nil, - TraceContext{}, - TraceContext{}, - }, - { - "tc is nil, other is nil, yields an empty TraceContext", - nil, - nil, - TraceContext{}, - }, - { - "tc is not nil, other is nil, yields tc", - TraceContext{ - "data": 42, - }, - nil, - TraceContext{ - "data": 42, - }, - }, - { - "tc is not nil, other is not nil, yields tc + other", - TraceContext{ - "data": 42, - }, - TraceContext{ - "data2": 42, - }, - TraceContext{ - "data": 42, - "data2": 42, - }, - }, - { - "tc is not nil, other is not nil, other updates value in tc, yields tc updated with value from other", - TraceContext{ - "data": 42, - }, - TraceContext{ - "data": 24, - }, - TraceContext{ - "data": 24, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.want, tt.tc.Merge(tt.other)) - }) - } -} - -func TestNewTransientStoreKeys(t *testing.T) { - assert.DeepEqual(t, map[string]*TransientStoreKey{}, NewTransientStoreKeys()) - assert.DeepEqual(t, 1, len(NewTransientStoreKeys("one"))) -} - -func TestNewInfiniteGasMeter(t *testing.T) { - gm := NewInfiniteGasMeter() - require.NotNil(t, gm) -} - -func TestStoreTypes(t *testing.T) { - assert.DeepEqual(t, InclusiveEndBytes([]byte("endbytes")), InclusiveEndBytes([]byte("endbytes"))) -} diff --git a/store/types/streaming.go b/store/types/streaming.go deleted file mode 100644 index 68a5c92234..0000000000 --- a/store/types/streaming.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -import ( - "context" - - abci "github.com/cometbft/cometbft/abci/types" -) - -// ABCIListener is the interface that we're exposing as a streaming service. -// It hooks into the ABCI message processing of the BaseApp. -// The error results are propagated to consensus state machine, -// if you don't want to affect consensus, handle the errors internally and always return `nil` in these APIs. -type ABCIListener interface { - // ListenFinalizeBlock updates the streaming service with the latest FinalizeBlock messages - ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error - // ListenCommit updates the steaming service with the latest Commit messages and state changes - ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*StoreKVPair) error -} - -// StreamingManager is the struct that maintains a list of ABCIListeners and configuration settings. -type StreamingManager struct { - // ABCIListeners for hooking into the ABCI message processing of the BaseApp - // and exposing the requests and responses to external consumers - ABCIListeners []ABCIListener - - // StopNodeOnErr halts the node when ABCI streaming service listening results in an error. - StopNodeOnErr bool -} diff --git a/store/types/utils.go b/store/types/utils.go deleted file mode 100644 index a54d2746f7..0000000000 --- a/store/types/utils.go +++ /dev/null @@ -1,94 +0,0 @@ -package types - -import ( - "encoding/binary" - "fmt" - "sort" - "strings" -) - -// KVStorePrefixIterator iterates over all the keys with a certain prefix in ascending order -func KVStorePrefixIterator(kvs KVStore, prefix []byte) Iterator { - return kvs.Iterator(prefix, PrefixEndBytes(prefix)) -} - -// KVStoreReversePrefixIterator iterates over all the keys with a certain prefix in descending order. -func KVStoreReversePrefixIterator(kvs KVStore, prefix []byte) Iterator { - return kvs.ReverseIterator(prefix, PrefixEndBytes(prefix)) -} - -// PrefixEndBytes returns the []byte that would end a -// range query for all []byte with a certain prefix -// Deals with last byte of prefix being FF without overflowing -func PrefixEndBytes(prefix []byte) []byte { - if len(prefix) == 0 { - return nil - } - - end := make([]byte, len(prefix)) - copy(end, prefix) - - for { - if end[len(end)-1] != byte(255) { - end[len(end)-1]++ - break - } - - end = end[:len(end)-1] - - if len(end) == 0 { - end = nil - break - } - } - - return end -} - -// InclusiveEndBytes returns the []byte that would end a -// range query such that the input would be included -func InclusiveEndBytes(inclusiveBytes []byte) []byte { - return append(inclusiveBytes, byte(0x00)) -} - -// assertNoCommonPrefix will panic if there are two keys: k1 and k2 in keys, such that -// k1 is a prefix of k2 -func assertNoCommonPrefix(keys []string) { - sorted := make([]string, len(keys)) - copy(sorted, keys) - sort.Strings(sorted) - for i := 1; i < len(sorted); i++ { - if strings.HasPrefix(sorted[i], sorted[i-1]) { - panic(fmt.Sprint("Potential key collision between KVStores:", sorted[i], " - ", sorted[i-1])) - } - } -} - -// Uint64ToBigEndian - marshals uint64 to a bigendian byte slice so it can be sorted -func Uint64ToBigEndian(i uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, i) - return b -} - -// BigEndianToUint64 returns an uint64 from big endian encoded bytes. If encoding -// is empty, zero is returned. -func BigEndianToUint64(bz []byte) uint64 { - if len(bz) == 0 { - return 0 - } - - return binary.BigEndian.Uint64(bz) -} - -// SliceContains implements a generic function for checking if a slice contains -// a certain value. -func SliceContains[T comparable](elements []T, v T) bool { - for _, s := range elements { - if v == s { - return true - } - } - - return false -} diff --git a/store/types/utils_test.go b/store/types/utils_test.go deleted file mode 100644 index d05d9df6d6..0000000000 --- a/store/types/utils_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package types_test - -import ( - "testing" - - "gotest.tools/v3/assert" - - "cosmossdk.io/store/types" -) - -func TestPrefixEndBytes(t *testing.T) { - t.Parallel() - testCases := []struct { - prefix []byte - expected []byte - }{ - {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, - {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, - {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, - {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, - {[]byte{byte(255), byte(255), byte(255)}, nil}, - {[]byte{byte(255)}, nil}, - {nil, nil}, - } - - for _, test := range testCases { - end := types.PrefixEndBytes(test.prefix) - assert.DeepEqual(t, test.expected, end) - } -} - -func TestInclusiveEndBytes(t *testing.T) { - t.Parallel() - assert.DeepEqual(t, []byte{0x00}, types.InclusiveEndBytes(nil)) - bs := []byte("test") - assert.DeepEqual(t, append(bs, byte(0x00)), types.InclusiveEndBytes(bs)) -} diff --git a/store/types/validity_test.go b/store/types/validity_test.go deleted file mode 100644 index 56e6791364..0000000000 --- a/store/types/validity_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package types_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" -) - -func TestAssertValidKey(t *testing.T) { - t.Parallel() - require.NotPanics(t, func() { types.AssertValidKey([]byte{0x01}) }) - require.Panics(t, func() { types.AssertValidKey([]byte{}) }) - require.Panics(t, func() { types.AssertValidKey(nil) }) -} - -func TestAssertValidValue(t *testing.T) { - t.Parallel() - require.NotPanics(t, func() { types.AssertValidValue([]byte{}) }) - require.NotPanics(t, func() { types.AssertValidValue([]byte{0x01}) }) - require.Panics(t, func() { types.AssertValidValue(nil) }) -} diff --git a/store/types/validity.go b/store/validation.go similarity index 64% rename from store/types/validity.go rename to store/validation.go index a1fbaba999..e1bfcfa78b 100644 --- a/store/types/validity.go +++ b/store/validation.go @@ -1,13 +1,15 @@ -package types +package store var ( // 128K - 1 MaxKeyLength = (1 << 17) - 1 + // 2G - 1 MaxValueLength = (1 << 31) - 1 ) -// AssertValidKey checks if the key is valid(key is not nil, not empty and within length limit) +// AssertValidKey checks if the key is valid, i.e. key is not nil, not empty and +// within length limit. func AssertValidKey(key []byte) { if len(key) == 0 { panic("key is nil or empty") @@ -17,7 +19,8 @@ func AssertValidKey(key []byte) { } } -// AssertValidValue checks if the value is valid(value is not nil and within length limit) +// AssertValidValue checks if the value is valid, i.e. value is not nil and +// within length limit. func AssertValidValue(value []byte) { if value == nil { panic("value is nil") diff --git a/tests/go.mod b/tests/go.mod index b5c2e53298..b36b4f81ca 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -129,7 +129,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/tests/go.sum b/tests/go.sum index 800706bf68..0830a8fce2 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -748,8 +748,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/tests/starship/tests/go.mod b/tests/starship/tests/go.mod index 9e2c20eb9d..cd5d5e5ea1 100644 --- a/tests/starship/tests/go.mod +++ b/tests/starship/tests/go.mod @@ -144,7 +144,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/tests/starship/tests/go.sum b/tests/starship/tests/go.sum index b8a8e6266c..cca63c78a8 100644 --- a/tests/starship/tests/go.sum +++ b/tests/starship/tests/go.sum @@ -751,8 +751,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/x/circuit/go.mod b/x/circuit/go.mod index 19ece435ce..f29adae445 100644 --- a/x/circuit/go.mod +++ b/x/circuit/go.mod @@ -96,7 +96,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect diff --git a/x/circuit/go.sum b/x/circuit/go.sum index f1ce1effa5..6f43701079 100644 --- a/x/circuit/go.sum +++ b/x/circuit/go.sum @@ -527,8 +527,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/x/evidence/go.mod b/x/evidence/go.mod index 3c38e9852a..27fb1aeae5 100644 --- a/x/evidence/go.mod +++ b/x/evidence/go.mod @@ -99,7 +99,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect diff --git a/x/evidence/go.sum b/x/evidence/go.sum index f1ce1effa5..6f43701079 100644 --- a/x/evidence/go.sum +++ b/x/evidence/go.sum @@ -527,8 +527,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/x/feegrant/go.mod b/x/feegrant/go.mod index bced5d11c9..87c0f9435c 100644 --- a/x/feegrant/go.mod +++ b/x/feegrant/go.mod @@ -100,7 +100,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/x/feegrant/go.sum b/x/feegrant/go.sum index f08b29146d..b64ae45fd2 100644 --- a/x/feegrant/go.sum +++ b/x/feegrant/go.sum @@ -531,8 +531,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/x/nft/go.mod b/x/nft/go.mod index 00db073845..93d6edef83 100644 --- a/x/nft/go.mod +++ b/x/nft/go.mod @@ -96,7 +96,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect diff --git a/x/nft/go.sum b/x/nft/go.sum index f1ce1effa5..6f43701079 100644 --- a/x/nft/go.sum +++ b/x/nft/go.sum @@ -527,8 +527,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/x/params/go.mod b/x/params/go.mod index 669e304c50..93389a21bb 100644 --- a/x/params/go.mod +++ b/x/params/go.mod @@ -97,7 +97,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect diff --git a/x/params/go.sum b/x/params/go.sum index f1ce1effa5..6f43701079 100644 --- a/x/params/go.sum +++ b/x/params/go.sum @@ -527,8 +527,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/x/protocolpool/go.mod b/x/protocolpool/go.mod index b0a26c773d..21bfa7f4bb 100644 --- a/x/protocolpool/go.mod +++ b/x/protocolpool/go.mod @@ -98,7 +98,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/x/protocolpool/go.sum b/x/protocolpool/go.sum index 44ef26a41f..7c286caf37 100644 --- a/x/protocolpool/go.sum +++ b/x/protocolpool/go.sum @@ -529,8 +529,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= diff --git a/x/upgrade/go.mod b/x/upgrade/go.mod index f189978a0e..7c0ae22bda 100644 --- a/x/upgrade/go.mod +++ b/x/upgrade/go.mod @@ -118,7 +118,7 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lib/pq v1.10.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.8.0 // indirect + github.com/linxGnu/grocksdb v1.8.4 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect diff --git a/x/upgrade/go.sum b/x/upgrade/go.sum index e2ccbe6d58..549a383bd4 100644 --- a/x/upgrade/go.sum +++ b/x/upgrade/go.sum @@ -748,8 +748,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linxGnu/grocksdb v1.8.0 h1:H4L/LhP7GOMf1j17oQAElHgVlbEje2h14A8Tz9cM2BE= -github.com/linxGnu/grocksdb v1.8.0/go.mod h1:09CeBborffXhXdNpEcOeZrLKEnRtrZFEpFdPNI9Zjjg= +github.com/linxGnu/grocksdb v1.8.4 h1:ZMsBpPpJNtRLHiKKp0mI7gW+NT4s7UgfD5xHxx1jVRo= +github.com/linxGnu/grocksdb v1.8.4/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=