diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 813453c5fb..35755a276d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,8 +14,6 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: DeterminateSystems/nix-installer-action@main - - uses: DeterminateSystems/magic-nix-cache-action@main - uses: actions/setup-go@v5 with: go-version: "1.22.2" @@ -31,9 +29,7 @@ jobs: if: env.GIT_DIFF id: lint_long run: | - nix develop -c make lint - env: - NIX: 1 + make lint - uses: technote-space/get-diff-action@v6.1.2 if: steps.lint_long.outcome == 'skipped' with: @@ -43,8 +39,7 @@ jobs: - name: run linting (short) if: steps.lint_long.outcome == 'skipped' && env.GIT_DIFF run: | - nix develop -c make lint + make lint env: GIT_DIFF: ${{ env.GIT_DIFF }} LINT_DIFF: 1 - NIX: 1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9deffe53d6..24ecb62423 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -740,74 +740,6 @@ jobs: with: projectBaseDir: tools/hubl/ - test-store: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: DeterminateSystems/nix-installer-action@main - - uses: DeterminateSystems/magic-nix-cache-action@main - - uses: actions/setup-go@v5 - with: - go-version: "1.20" - check-latest: true - cache: true - cache-dependency-path: store/go.sum - - uses: technote-space/get-diff-action@v6.1.2 - id: git_diff - with: - PATTERNS: | - store/**/*.go - store/go.mod - store/go.sum - - name: tests - if: env.GIT_DIFF - run: | - cd store - nix develop .. -c go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock rocksdb' ./... - - name: sonarcloud - if: ${{ env.GIT_DIFF && !github.event.pull_request.draft && env.SONAR_TOKEN != null }} - uses: SonarSource/sonarcloud-github-action@master - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - with: - projectBaseDir: store/ - - test-store-v2: - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v4 - - uses: DeterminateSystems/nix-installer-action@main - - uses: DeterminateSystems/magic-nix-cache-action@main - - uses: actions/setup-go@v5 - with: - go-version: "1.22" - check-latest: true - cache: true - cache-dependency-path: store/v2/go.sum - - uses: technote-space/get-diff-action@v6.1.2 - id: git_diff - with: - PATTERNS: | - store/v2/**/*.go - store/v2/go.mod - store/v2/go.sum - - name: test & coverage report creation - if: env.GIT_DIFF - run: | - cd store/v2 - nix develop .. -c go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock rocksdb' ./... - - name: sonarcloud - if: ${{ env.GIT_DIFF && !github.event.pull_request.draft && env.SONAR_TOKEN != null }} - uses: SonarSource/sonarcloud-github-action@master - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - with: - projectBaseDir: store/v2/ - test-log: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/v2-test.yml b/.github/workflows/v2-test.yml index 9bde9c3f39..2fdb518c80 100644 --- a/.github/workflows/v2-test.yml +++ b/.github/workflows/v2-test.yml @@ -14,78 +14,6 @@ concurrency: cancel-in-progress: true jobs: - server-v2: - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: "1.22" - check-latest: true - cache: true - cache-dependency-path: go.sum - - uses: technote-space/get-diff-action@v6.1.2 - id: git_diff - with: - PATTERNS: | - server/v2/*.go - server/v2/go.mod - server/v2/go.sum - server/v2/testdata/*.toml - - name: test & coverage report creation - if: env.GIT_DIFF - run: | - cd server/v2 && go test -mod=readonly -race -timeout 30m -tags='ledger test_ledger_mock' - stf: - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: "1.22" - check-latest: true - cache: true - cache-dependency-path: go.sum - - uses: technote-space/get-diff-action@v6.1.2 - id: git_diff - with: - PATTERNS: | - server/v2/stf/**/*.go - server/v2/stf/go.mod - server/v2/stf/go.sum - - name: test & coverage report creation - if: env.GIT_DIFF - run: | - cd server/v2/stf && go test -mod=readonly -race -timeout 30m -tags='ledger test_ledger_mock' - - appmanager: - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: "1.22" - check-latest: true - cache: true - cache-dependency-path: go.sum - - uses: technote-space/get-diff-action@v6.1.2 - id: git_diff - with: - PATTERNS: | - server/v2/appmanager/**/*.go - server/v2/appmanager/go.mod - server/v2/appmanager/go.sum - - name: test & coverage report creation - if: env.GIT_DIFF - run: | - cd server/v2/appmanager && go test -mod=readonly -race -timeout 30m -tags='ledger test_ledger_mock' - cometbft: runs-on: ubuntu-latest strategy: diff --git a/client/v2/autocli/flag/coin.go b/client/v2/autocli/flag/coin.go index 6ed842a34a..f317d85857 100644 --- a/client/v2/autocli/flag/coin.go +++ b/client/v2/autocli/flag/coin.go @@ -2,7 +2,7 @@ package flag import ( "context" - "fmt" + "errors" "strings" "google.golang.org/protobuf/reflect/protoreflect" @@ -38,7 +38,7 @@ func (c *coinValue) String() string { func (c *coinValue) Set(stringValue string) error { if strings.Contains(stringValue, ",") { - return fmt.Errorf("coin flag must be a single coin, specific multiple coins with multiple flags or spaces") + return errors.New("coin flag must be a single coin, specific multiple coins with multiple flags or spaces") } coin, err := coins.ParseCoin(stringValue) diff --git a/client/v2/autocli/testdata/msg-output.golden b/client/v2/autocli/testdata/msg-output.golden index 8e075ccb14..b08520d200 100644 --- a/client/v2/autocli/testdata/msg-output.golden +++ b/client/v2/autocli/testdata/msg-output.golden @@ -1 +1 @@ -{"body":{"messages":[{"@type":"/cosmos.bank.v1beta1.MsgSend","from_address":"cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk","to_address":"cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk","amount":[{"denom":"foo","amount":"1"}]}],"timeout_timestamp":"1970-01-01T00:00:00Z"},"auth_info":{"fee":{"gas_limit":"200000"}}} \ No newline at end of file +{"body":{"messages":[{"@type":"/cosmos.bank.v1beta1.MsgSend","from_address":"cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk","to_address":"cosmos1y74p8wyy4enfhfn342njve6cjmj5c8dtl6emdk","amount":[{"denom":"foo","amount":"1"}]}],"memo":"","timeout_height":"0","unordered":false,"timeout_timestamp":"1970-01-01T00:00:00Z","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""},"tip":null},"signatures":[]} \ No newline at end of file diff --git a/client/v2/go.mod b/client/v2/go.mod index c876fdd94c..3b5099004b 100644 --- a/client/v2/go.mod +++ b/client/v2/go.mod @@ -10,7 +10,7 @@ require ( cosmossdk.io/x/gov v0.0.0-20231113122742-912390d5fc4a cosmossdk.io/x/tx v0.13.3 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 google.golang.org/grpc v1.64.1 @@ -26,6 +26,7 @@ require ( cosmossdk.io/errors v1.0.1 // indirect cosmossdk.io/log v1.3.1 // indirect cosmossdk.io/math v1.3.0 + cosmossdk.io/schema v0.1.1 // indirect cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // indirect cosmossdk.io/x/auth v0.0.0-00010101000000-000000000000 cosmossdk.io/x/consensus v0.0.0-00010101000000-000000000000 // indirect @@ -119,6 +120,7 @@ require ( github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect github.com/oklog/run v1.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect @@ -170,11 +172,6 @@ require ( pgregory.net/rapid v1.1.0 // indirect ) -require ( - cosmossdk.io/schema v0.1.1 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect -) - replace github.com/cosmos/cosmos-sdk => ./../../ // TODO remove post spinning out all modules @@ -182,7 +179,7 @@ replace ( cosmossdk.io/api => ./../../api cosmossdk.io/core => ./../../core cosmossdk.io/core/testing => ../../core/testing - cosmossdk.io/store => ./../../store + cosmossdk.io/store => cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 // main cosmossdk.io/x/accounts => ./../../x/accounts cosmossdk.io/x/auth => ./../../x/auth cosmossdk.io/x/bank => ./../../x/bank diff --git a/client/v2/go.sum b/client/v2/go.sum index dc867c1802..2331b7ca69 100644 --- a/client/v2/go.sum +++ b/client/v2/go.sum @@ -16,6 +16,8 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 h1:lhyOHcIJU+IB6i5sO36DWC2r4QXDEk/bsno7jrTr28k= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6/go.mod h1:CY8wAToETz/dmuuKwf/qfXEImtey4jWdWWcoavfQWNw= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= diff --git a/client/v2/internal/coins/format.go b/client/v2/internal/coins/format.go index 1c84fe9bea..c447040c24 100644 --- a/client/v2/internal/coins/format.go +++ b/client/v2/internal/coins/format.go @@ -1,7 +1,7 @@ package coins import ( - "fmt" + "errors" "regexp" "strings" @@ -19,13 +19,13 @@ func ParseCoin(input string) (*basev1beta1.Coin, error) { input = strings.TrimSpace(input) if input == "" { - return nil, fmt.Errorf("empty input when parsing coin") + return nil, errors.New("empty input when parsing coin") } matches := coinRegex.FindStringSubmatch(input) if len(matches) == 0 { - return nil, fmt.Errorf("invalid input format") + return nil, errors.New("invalid input format") } return &basev1beta1.Coin{ diff --git a/client/v2/internal/prompt/validation.go b/client/v2/internal/prompt/validation.go index 8a6e5a2d33..d914999f21 100644 --- a/client/v2/internal/prompt/validation.go +++ b/client/v2/internal/prompt/validation.go @@ -1,6 +1,7 @@ package prompt import ( + "errors" "fmt" "net/url" @@ -10,7 +11,7 @@ import ( // ValidatePromptNotEmpty validates that the input is not empty. func ValidatePromptNotEmpty(input string) error { if input == "" { - return fmt.Errorf("input cannot be empty") + return errors.New("input cannot be empty") } return nil diff --git a/client/v2/internal/testpb/msg.proto b/client/v2/internal/testpb/msg.proto index a1360175d0..9f4c69c539 100644 --- a/client/v2/internal/testpb/msg.proto +++ b/client/v2/internal/testpb/msg.proto @@ -16,7 +16,7 @@ service Msg { }; rpc Clawback(MsgClawbackRequest) returns (MsgClawbackResponse) { - option (cosmos_proto.method_added_in) = "cosmos-sdk v0.51.0"; + option (cosmos_proto.method_added_in) = "cosmos-sdk v0.52.0 "; } } @@ -59,4 +59,4 @@ message MsgResponse { message MsgClawbackRequest {} -message MsgClawbackResponse {} \ No newline at end of file +message MsgClawbackResponse {} diff --git a/client/v2/internal/testpb/msg.pulsar.go b/client/v2/internal/testpb/msg.pulsar.go index 29fd89007a..79979ababa 100644 --- a/client/v2/internal/testpb/msg.pulsar.go +++ b/client/v2/internal/testpb/msg.pulsar.go @@ -4287,27 +4287,27 @@ var file_testpb_msg_proto_rawDesc = []byte{ 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x14, 0x0a, 0x12, 0x4d, 0x73, 0x67, 0x43, 0x6c, 0x61, 0x77, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x15, 0x0a, 0x13, 0x4d, 0x73, 0x67, 0x43, 0x6c, 0x61, 0x77, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xab, 0x01, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12, 0x47, 0x0a, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xac, 0x01, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12, 0x47, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, 0x12, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x62, 0x2e, 0x4d, 0x73, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x62, 0x2e, 0x4d, 0x73, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0xca, 0xb4, 0x2d, 0x12, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x20, 0x76, - 0x30, 0x2e, 0x35, 0x30, 0x2e, 0x30, 0x12, 0x5b, 0x0a, 0x08, 0x43, 0x6c, 0x61, 0x77, 0x62, 0x61, + 0x30, 0x2e, 0x35, 0x30, 0x2e, 0x30, 0x12, 0x5c, 0x0a, 0x08, 0x43, 0x6c, 0x61, 0x77, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x1a, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x62, 0x2e, 0x4d, 0x73, 0x67, 0x43, 0x6c, 0x61, 0x77, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x70, 0x62, 0x2e, 0x4d, 0x73, 0x67, 0x43, 0x6c, 0x61, 0x77, 0x62, - 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0xca, 0xb4, 0x2d, - 0x12, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x20, 0x76, 0x30, 0x2e, 0x35, - 0x31, 0x2e, 0x30, 0x42, 0x86, 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x2e, 0x74, 0x65, 0x73, 0x74, - 0x70, 0x62, 0x42, 0x08, 0x4d, 0x73, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, - 0x73, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x32, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, - 0x74, 0x65, 0x73, 0x74, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x54, 0x58, 0x58, 0xaa, 0x02, 0x06, 0x54, - 0x65, 0x73, 0x74, 0x70, 0x62, 0xca, 0x02, 0x06, 0x54, 0x65, 0x73, 0x74, 0x70, 0x62, 0xe2, 0x02, - 0x12, 0x54, 0x65, 0x73, 0x74, 0x70, 0x62, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0xea, 0x02, 0x06, 0x54, 0x65, 0x73, 0x74, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0xca, 0xb4, 0x2d, + 0x13, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x20, 0x76, 0x30, 0x2e, 0x35, + 0x32, 0x2e, 0x30, 0x20, 0x42, 0x86, 0x01, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x70, 0x62, 0x42, 0x08, 0x4d, 0x73, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x73, 0x6d, + 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x32, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x74, 0x65, 0x73, 0x74, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x54, 0x58, 0x58, 0xaa, 0x02, 0x06, + 0x54, 0x65, 0x73, 0x74, 0x70, 0x62, 0xca, 0x02, 0x06, 0x54, 0x65, 0x73, 0x74, 0x70, 0x62, 0xe2, + 0x02, 0x12, 0x54, 0x65, 0x73, 0x74, 0x70, 0x62, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x06, 0x54, 0x65, 0x73, 0x74, 0x70, 0x62, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/client/v2/offchain/verify.go b/client/v2/offchain/verify.go index 303a086022..8b9580d632 100644 --- a/client/v2/offchain/verify.go +++ b/client/v2/offchain/verify.go @@ -123,7 +123,7 @@ func verifySignature( return err } if !pubKey.VerifySignature(signBytes, data.Signature) { - return fmt.Errorf("unable to verify single signer signature") + return errors.New("unable to verify single signer signature") } return nil default: diff --git a/contrib/images/simd-env/Dockerfile b/contrib/images/simd-env/Dockerfile index a61aca7c5f..07cfa091fd 100644 --- a/contrib/images/simd-env/Dockerfile +++ b/contrib/images/simd-env/Dockerfile @@ -9,7 +9,6 @@ COPY api/go.mod api/go.sum /work/api/ COPY log/go.mod log/go.sum /work/log/ COPY core/go.mod core/go.sum /work/core/ COPY collections/go.mod collections/go.sum /work/collections/ -COPY store/go.mod store/go.sum /work/store/ COPY depinject/go.mod depinject/go.sum /work/depinject/ COPY x/tx/go.mod x/tx/go.sum /work/x/tx/ COPY x/protocolpool/go.mod x/protocolpool/go.sum /work/x/protocolpool/ @@ -24,9 +23,6 @@ COPY x/mint/go.mod x/mint/go.sum /work/x/mint/ COPY x/consensus/go.mod x/consensus/go.sum /work/x/consensus/ COPY x/accounts/go.mod x/accounts/go.sum /work/x/accounts/ COPY runtime/v2/go.mod runtime/v2/go.sum /work/runtime/v2/ -COPY server/v2/go.mod server/v2/go.sum /work/server/v2/ -COPY server/v2/appmanager/go.mod server/v2/appmanager/go.sum /work/server/v2/appmanager/ -COPY server/v2/stf/go.mod server/v2/stf/go.sum /work/server/v2/stf/ COPY server/v2/cometbft/go.mod server/v2/cometbft/go.sum /work/server/v2/cometbft/ COPY core/testing/go.mod core/testing/go.sum /work/core/testing/ diff --git a/go.mod b/go.mod index 217ad6e2ee..717f5fecad 100644 --- a/go.mod +++ b/go.mod @@ -188,7 +188,7 @@ replace ( cosmossdk.io/collections => ./collections cosmossdk.io/core => ./core cosmossdk.io/core/testing => ./core/testing - cosmossdk.io/store => ./store + cosmossdk.io/store => cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 // main cosmossdk.io/x/accounts => ./x/accounts cosmossdk.io/x/auth => ./x/auth cosmossdk.io/x/bank => ./x/bank diff --git a/go.sum b/go.sum index c63d7cbaf7..420fbb6f0d 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,8 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 h1:lhyOHcIJU+IB6i5sO36DWC2r4QXDEk/bsno7jrTr28k= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6/go.mod h1:CY8wAToETz/dmuuKwf/qfXEImtey4jWdWWcoavfQWNw= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= diff --git a/go.work.example b/go.work.example index 035cbb3e34..daab3b444f 100644 --- a/go.work.example +++ b/go.work.example @@ -16,11 +16,6 @@ use ( ./simapp ./tests ./tests/systemtests - ./schema - ./server/v2/stf - ./server/v2/appmanager - ./store - ./store/v2 ./runtime/v2 ./tools/cosmovisor ./tools/confix diff --git a/indexer/postgres/CHANGELOG.md b/indexer/postgres/CHANGELOG.md deleted file mode 100644 index 0c3c9d0385..0000000000 --- a/indexer/postgres/CHANGELOG.md +++ /dev/null @@ -1,37 +0,0 @@ - - -# Changelog - -## [Unreleased] diff --git a/indexer/postgres/README.md b/indexer/postgres/README.md deleted file mode 100644 index bb8c480f66..0000000000 --- a/indexer/postgres/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# PostgreSQL Indexer - -The PostgreSQL indexer can fully index the current state for all modules that implement `cosmossdk.io/schema.HasModuleCodec`. -implement `cosmossdk.io/schema.HasModuleCodec`. - -## Table, Column and Enum Naming - -`ObjectType`s names are converted to table names prefixed with the module name and an underscore. i.e. the `ObjectType` `foo` in module `bar` will be stored in a table named `bar_foo`. - -Column names are identical to field names. All identifiers are quoted with double quotes so that they are case-sensitive and won't clash with any reserved names. - -Like, table names, enum types are prefixed with the module name and an underscore. - -## Schema Type Mapping - -The mapping of `cosmossdk.io/schema` `Kind`s to PostgreSQL types is as follows: - -| Kind | PostgreSQL Type | Notes | -|---------------------|----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `StringKind` | `TEXT` | | -| `BoolKind` | `BOOLEAN` | | -| `BytesKind` | `BYTEA` | | -| `Int8Kind` | `SMALLINT` | | -| `Int16Kind` | `SMALLINT` | | -| `Int32Kind` | `INTEGER` | | -| `Int64Kind` | `BIGINT` | | -| `Uint8Kind` | `SMALLINT` | | -| `Uint16Kind` | `INTEGER` | | -| `Uint32Kind` | `BIGINT` | | -| `Uint64Kind` | `NUMERIC` | | -| `Float32Kind` | `REAL` | | -| `Float64Kind` | `DOUBLE PRECISION` | | -| `IntegerStringKind` | `NUMERIC` | | -| `DecimalStringKind` | `NUMERIC` | | -| `JSONKind` | `JSONB` | | -| `Bech32AddressKind` | `TEXT` | addresses are converted to strings with the specified address prefix | -| `TimeKind` | `BIGINT` and `TIMESTAMPTZ` | time types are stored as two columns, one with the `_nanos` suffix with full nanoseconds precision, and another as a `TIMESTAMPTZ` generated column with microsecond precision | -| `DurationKind` | `BIGINT` | durations are stored as a single column in nanoseconds | -| `EnumKind` | `_` | a custom enum type is created for each module prefixed with the module name it pertains to | - - diff --git a/indexer/postgres/base_sql.go b/indexer/postgres/base_sql.go deleted file mode 100644 index 81e1ac7042..0000000000 --- a/indexer/postgres/base_sql.go +++ /dev/null @@ -1,8 +0,0 @@ -package postgres - -// BaseSQL is the base SQL that is always included in the schema. -const BaseSQL = ` -CREATE OR REPLACE FUNCTION nanos_to_timestamptz(nanos bigint) RETURNS timestamptz AS $$ - SELECT to_timestamp(nanos / 1000000000) + (nanos / 1000000000) * INTERVAL '1 microsecond' -$$ LANGUAGE SQL IMMUTABLE; -` diff --git a/indexer/postgres/column.go b/indexer/postgres/column.go deleted file mode 100644 index f9692af137..0000000000 --- a/indexer/postgres/column.go +++ /dev/null @@ -1,120 +0,0 @@ -package postgres - -import ( - "fmt" - "io" - - "cosmossdk.io/schema" -) - -// createColumnDefinition writes a column definition within a CREATE TABLE statement for the field. -func (tm *ObjectIndexer) createColumnDefinition(writer io.Writer, field schema.Field) error { - _, err := fmt.Fprintf(writer, "%q ", field.Name) - if err != nil { - return err - } - - simple := simpleColumnType(field.Kind) - if simple != "" { - _, err = fmt.Fprintf(writer, "%s", simple) - if err != nil { - return err - } - - return writeNullability(writer, field.Nullable) - } else { - switch field.Kind { - case schema.EnumKind: - _, err = fmt.Fprintf(writer, "%q", enumTypeName(tm.moduleName, field.EnumDefinition)) - if err != nil { - return err - } - case schema.TimeKind: - // for time fields, we generate two columns: - // - one with nanoseconds precision for lossless storage, suffixed with _nanos - // - one as a timestamptz (microsecond precision) for ease of use, that is GENERATED - nanosColName := fmt.Sprintf("%s_nanos", field.Name) - _, err = fmt.Fprintf(writer, "TIMESTAMPTZ GENERATED ALWAYS AS (nanos_to_timestamptz(%q)) STORED,\n\t", nanosColName) - if err != nil { - return err - } - - _, err = fmt.Fprintf(writer, `%q BIGINT`, nanosColName) - if err != nil { - return err - } - default: - return fmt.Errorf("unexpected kind: %v, this should have been handled earlier", field.Kind) - } - - return writeNullability(writer, field.Nullable) - } -} - -// writeNullability writes column nullability. -func writeNullability(writer io.Writer, nullable bool) error { - if nullable { - _, err := fmt.Fprintf(writer, " NULL,\n\t") - return err - } else { - _, err := fmt.Fprintf(writer, " NOT NULL,\n\t") - return err - } -} - -// simpleColumnType returns the postgres column type for the kind for simple types. -func simpleColumnType(kind schema.Kind) string { - //nolint:goconst // adding constants for these postgres type names would impede readability - switch kind { - case schema.StringKind: - return "TEXT" - case schema.BoolKind: - return "BOOLEAN" - case schema.BytesKind: - return "BYTEA" - case schema.Int8Kind: - return "SMALLINT" - case schema.Int16Kind: - return "SMALLINT" - case schema.Int32Kind: - return "INTEGER" - case schema.Int64Kind: - return "BIGINT" - case schema.Uint8Kind: - return "SMALLINT" - case schema.Uint16Kind: - return "INTEGER" - case schema.Uint32Kind: - return "BIGINT" - case schema.Uint64Kind: - return "NUMERIC" - case schema.IntegerStringKind: - return "NUMERIC" - case schema.DecimalStringKind: - return "NUMERIC" - case schema.Float32Kind: - return "REAL" - case schema.Float64Kind: - return "DOUBLE PRECISION" - case schema.JSONKind: - return "JSONB" - case schema.DurationKind: - return "BIGINT" - case schema.Bech32AddressKind: - return "TEXT" - default: - return "" - } -} - -// updatableColumnName is the name of the insertable/updatable column name for the field. -// This is the field name in most cases, except for time columns which are stored as nanos -// and then converted to timestamp generated columns. -func (tm *ObjectIndexer) updatableColumnName(field schema.Field) (name string, err error) { - name = field.Name - if field.Kind == schema.TimeKind { - name = fmt.Sprintf("%s_nanos", name) - } - name = fmt.Sprintf("%q", name) - return -} diff --git a/indexer/postgres/conn.go b/indexer/postgres/conn.go deleted file mode 100644 index de8c1cac6b..0000000000 --- a/indexer/postgres/conn.go +++ /dev/null @@ -1,14 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" -) - -// DBConn is an interface that abstracts the *sql.DB, *sql.Tx and *sql.Conn types. -type DBConn interface { - ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) - PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) - QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) - QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row -} diff --git a/indexer/postgres/create_table.go b/indexer/postgres/create_table.go deleted file mode 100644 index 8f5f0e6ca2..0000000000 --- a/indexer/postgres/create_table.go +++ /dev/null @@ -1,96 +0,0 @@ -package postgres - -import ( - "context" - "fmt" - "io" - "strings" -) - -// CreateTable creates the table for the object type. -func (tm *ObjectIndexer) CreateTable(ctx context.Context, conn DBConn) error { - buf := new(strings.Builder) - err := tm.CreateTableSql(buf) - if err != nil { - return err - } - - sqlStr := buf.String() - if tm.options.Logger != nil { - tm.options.Logger(fmt.Sprintf("Creating table %s", tm.TableName()), sqlStr) - } - _, err = conn.ExecContext(ctx, sqlStr) - return err -} - -// CreateTableSql generates a CREATE TABLE statement for the object type. -func (tm *ObjectIndexer) CreateTableSql(writer io.Writer) error { - _, err := fmt.Fprintf(writer, "CREATE TABLE IF NOT EXISTS %q (\n\t", tm.TableName()) - if err != nil { - return err - } - isSingleton := false - if len(tm.typ.KeyFields) == 0 { - isSingleton = true - _, err = fmt.Fprintf(writer, "_id INTEGER NOT NULL CHECK (_id = 1),\n\t") - if err != nil { - return err - } - } else { - for _, field := range tm.typ.KeyFields { - err = tm.createColumnDefinition(writer, field) - if err != nil { - return err - } - } - } - - for _, field := range tm.typ.ValueFields { - err = tm.createColumnDefinition(writer, field) - if err != nil { - return err - } - } - - // add _deleted column when we have RetainDeletions set and enabled - if !tm.options.DisableRetainDeletions && tm.typ.RetainDeletions { - _, err = fmt.Fprintf(writer, "_deleted BOOLEAN NOT NULL DEFAULT FALSE,\n\t") - if err != nil { - return err - } - } - - var pKeys []string - if !isSingleton { - for _, field := range tm.typ.KeyFields { - name, err := tm.updatableColumnName(field) - if err != nil { - return err - } - - pKeys = append(pKeys, name) - } - } else { - pKeys = []string{"_id"} - } - - _, err = fmt.Fprintf(writer, "PRIMARY KEY (%s)", strings.Join(pKeys, ", ")) - if err != nil { - return err - } - - _, err = fmt.Fprintf(writer, "\n);\n") - if err != nil { - return err - } - - // we GRANT SELECT on the table to PUBLIC so that the table is automatically available - // for querying using off-the-shelf tools like pg_graphql, Postgrest, Postgraphile, etc. - // without any login permissions - _, err = fmt.Fprintf(writer, "GRANT SELECT ON TABLE %q TO PUBLIC;", tm.TableName()) - if err != nil { - return err - } - - return nil -} diff --git a/indexer/postgres/create_table_test.go b/indexer/postgres/create_table_test.go deleted file mode 100644 index dec09d7aed..0000000000 --- a/indexer/postgres/create_table_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package postgres - -import ( - "os" - - "cosmossdk.io/indexer/postgres/internal/testdata" - "cosmossdk.io/schema" -) - -func ExampleObjectIndexer_CreateTableSql_allKinds() { - exampleCreateTable(testdata.AllKindsObject) - // Output: - // CREATE TABLE IF NOT EXISTS "test_all_kinds" ( - // "id" BIGINT NOT NULL, - // "ts" TIMESTAMPTZ GENERATED ALWAYS AS (nanos_to_timestamptz("ts_nanos")) STORED, - // "ts_nanos" BIGINT NOT NULL, - // "string" TEXT NOT NULL, - // "bytes" BYTEA NOT NULL, - // "int8" SMALLINT NOT NULL, - // "uint8" SMALLINT NOT NULL, - // "int16" SMALLINT NOT NULL, - // "uint16" INTEGER NOT NULL, - // "int32" INTEGER NOT NULL, - // "uint32" BIGINT NOT NULL, - // "int64" BIGINT NOT NULL, - // "uint64" NUMERIC NOT NULL, - // "integer" NUMERIC NOT NULL, - // "decimal" NUMERIC NOT NULL, - // "bool" BOOLEAN NOT NULL, - // "time" TIMESTAMPTZ GENERATED ALWAYS AS (nanos_to_timestamptz("time_nanos")) STORED, - // "time_nanos" BIGINT NOT NULL, - // "duration" BIGINT NOT NULL, - // "float32" REAL NOT NULL, - // "float64" DOUBLE PRECISION NOT NULL, - // "bech32address" TEXT NOT NULL, - // "enum" "test_my_enum" NOT NULL, - // "json" JSONB NOT NULL, - // PRIMARY KEY ("id", "ts_nanos") - // ); - // GRANT SELECT ON TABLE "test_all_kinds" TO PUBLIC; -} - -func ExampleObjectIndexer_CreateTableSql_singleton() { - exampleCreateTable(testdata.SingletonObject) - // Output: - // CREATE TABLE IF NOT EXISTS "test_singleton" ( - // _id INTEGER NOT NULL CHECK (_id = 1), - // "foo" TEXT NOT NULL, - // "bar" INTEGER NULL, - // "an_enum" "test_my_enum" NOT NULL, - // PRIMARY KEY (_id) - // ); - // GRANT SELECT ON TABLE "test_singleton" TO PUBLIC; -} - -func ExampleObjectIndexer_CreateTableSql_vote() { - exampleCreateTable(testdata.VoteObject) - // Output: - // CREATE TABLE IF NOT EXISTS "test_vote" ( - // "proposal" BIGINT NOT NULL, - // "address" TEXT NOT NULL, - // "vote" "test_vote_type" NOT NULL, - // _deleted BOOLEAN NOT NULL DEFAULT FALSE, - // PRIMARY KEY ("proposal", "address") - // ); - // GRANT SELECT ON TABLE "test_vote" TO PUBLIC; -} - -func ExampleObjectIndexer_CreateTableSql_vote_no_retain_delete() { - exampleCreateTableOpt(testdata.VoteObject, true) - // Output: - // CREATE TABLE IF NOT EXISTS "test_vote" ( - // "proposal" BIGINT NOT NULL, - // "address" TEXT NOT NULL, - // "vote" "test_vote_type" NOT NULL, - // PRIMARY KEY ("proposal", "address") - // ); - // GRANT SELECT ON TABLE "test_vote" TO PUBLIC; -} - -func exampleCreateTable(objectType schema.ObjectType) { - exampleCreateTableOpt(objectType, false) -} - -func exampleCreateTableOpt(objectType schema.ObjectType, noRetainDelete bool) { - tm := NewObjectIndexer("test", objectType, Options{ - Logger: func(msg, sql string, params ...interface{}) {}, - DisableRetainDeletions: noRetainDelete, - }) - err := tm.CreateTableSql(os.Stdout) - if err != nil { - panic(err) - } -} diff --git a/indexer/postgres/enum.go b/indexer/postgres/enum.go deleted file mode 100644 index c438257d20..0000000000 --- a/indexer/postgres/enum.go +++ /dev/null @@ -1,92 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - "fmt" - "io" - "strings" - - "cosmossdk.io/schema" -) - -// CreateEnumType creates an enum type in the database. -func (m *ModuleIndexer) CreateEnumType(ctx context.Context, conn DBConn, enum schema.EnumDefinition) error { - typeName := enumTypeName(m.moduleName, enum) - row := conn.QueryRowContext(ctx, "SELECT 1 FROM pg_type WHERE typname = $1", typeName) - var res interface{} - if err := row.Scan(&res); err != nil { - if err != sql.ErrNoRows { - return fmt.Errorf("failed to check if enum type %q exists: %v", typeName, err) //nolint:errorlint // using %v for go 1.12 compat - } - } else { - // the enum type already exists - return nil - } - - buf := new(strings.Builder) - err := CreateEnumTypeSql(buf, m.moduleName, enum) - if err != nil { - return err - } - - sqlStr := buf.String() - if m.options.Logger != nil { - m.options.Logger("Creating enum type", sqlStr) - } - _, err = conn.ExecContext(ctx, sqlStr) - return err -} - -// CreateEnumTypeSql generates a CREATE TYPE statement for the enum definition. -func CreateEnumTypeSql(writer io.Writer, moduleName string, enum schema.EnumDefinition) error { - _, err := fmt.Fprintf(writer, "CREATE TYPE %q AS ENUM (", enumTypeName(moduleName, enum)) - if err != nil { - return err - } - - for i, value := range enum.Values { - if i > 0 { - _, err = fmt.Fprintf(writer, ", ") - if err != nil { - return err - } - } - _, err = fmt.Fprintf(writer, "'%s'", value) - if err != nil { - return err - } - } - - _, err = fmt.Fprintf(writer, ");") - return err -} - -// enumTypeName returns the name of the enum type scoped to the module. -func enumTypeName(moduleName string, enum schema.EnumDefinition) string { - return fmt.Sprintf("%s_%s", moduleName, enum.Name) -} - -// createEnumTypesForFields creates enum types for all the fields that have enum kind in the module schema. -func (m *ModuleIndexer) createEnumTypesForFields(ctx context.Context, conn DBConn, fields []schema.Field) error { - for _, field := range fields { - if field.Kind != schema.EnumKind { - continue - } - - if _, ok := m.definedEnums[field.EnumDefinition.Name]; ok { - // if the enum type is already defined, skip - // we assume validation already happened - continue - } - - err := m.CreateEnumType(ctx, conn, field.EnumDefinition) - if err != nil { - return err - } - - m.definedEnums[field.EnumDefinition.Name] = field.EnumDefinition - } - - return nil -} diff --git a/indexer/postgres/enum_test.go b/indexer/postgres/enum_test.go deleted file mode 100644 index 22d8870171..0000000000 --- a/indexer/postgres/enum_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package postgres - -import ( - "os" - - "cosmossdk.io/indexer/postgres/internal/testdata" -) - -func ExampleCreateEnumTypeSql() { - err := CreateEnumTypeSql(os.Stdout, "test", testdata.MyEnum) - if err != nil { - panic(err) - } - // Output: - // CREATE TYPE "test_my_enum" AS ENUM ('a', 'b', 'c'); -} diff --git a/indexer/postgres/go.mod b/indexer/postgres/go.mod deleted file mode 100644 index d85dbc4671..0000000000 --- a/indexer/postgres/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module cosmossdk.io/indexer/postgres - -// NOTE: we are staying on an earlier version of golang to avoid problems building -// with older codebases. -go 1.12 - -// NOTE: cosmossdk.io/schema should be the only dependency here -// so there are no problems building this with any version of the SDK. -// This module should only use the golang standard library (database/sql) -// and cosmossdk.io/indexer/base. -require cosmossdk.io/schema v0.1.1 diff --git a/indexer/postgres/go.sum b/indexer/postgres/go.sum deleted file mode 100644 index 6a92c3d3ec..0000000000 --- a/indexer/postgres/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= -cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= diff --git a/indexer/postgres/indexer.go b/indexer/postgres/indexer.go deleted file mode 100644 index afcd8e0d8d..0000000000 --- a/indexer/postgres/indexer.go +++ /dev/null @@ -1,80 +0,0 @@ -package postgres - -import ( - "context" - "database/sql" - "fmt" - - "cosmossdk.io/schema/appdata" -) - -type Config struct { - // DatabaseURL is the PostgreSQL connection URL to use to connect to the database. - DatabaseURL string `json:"database_url"` - - // DatabaseDriver is the PostgreSQL database/sql driver to use. This defaults to "pgx". - DatabaseDriver string `json:"database_driver"` - - // DisableRetainDeletions disables the retain deletions functionality even if it is set in an object type schema. - DisableRetainDeletions bool `json:"disable_retain_deletions"` -} - -type SqlLogger = func(msg, sql string, params ...interface{}) - -func StartIndexer(ctx context.Context, logger SqlLogger, config Config) (appdata.Listener, error) { - if config.DatabaseURL == "" { - return appdata.Listener{}, fmt.Errorf("missing database URL") - } - - driver := config.DatabaseDriver - if driver == "" { - driver = "pgx" - } - - db, err := sql.Open(driver, config.DatabaseURL) - if err != nil { - return appdata.Listener{}, err - } - - tx, err := db.BeginTx(ctx, nil) - if err != nil { - return appdata.Listener{}, err - } - - // commit base schema - _, err = tx.Exec(BaseSQL) - if err != nil { - return appdata.Listener{}, err - } - - moduleIndexers := map[string]*ModuleIndexer{} - opts := Options{ - DisableRetainDeletions: config.DisableRetainDeletions, - Logger: logger, - } - - return appdata.Listener{ - InitializeModuleData: func(data appdata.ModuleInitializationData) error { - moduleName := data.ModuleName - modSchema := data.Schema - _, ok := moduleIndexers[moduleName] - if ok { - return fmt.Errorf("module %s already initialized", moduleName) - } - - mm := NewModuleIndexer(moduleName, modSchema, opts) - moduleIndexers[moduleName] = mm - - return mm.InitializeSchema(ctx, tx) - }, - Commit: func(data appdata.CommitData) error { - err = tx.Commit() - if err != nil { - return err - } - - tx, err = db.BeginTx(ctx, nil) - return err - }, - }, nil -} diff --git a/indexer/postgres/internal/testdata/example_schema.go b/indexer/postgres/internal/testdata/example_schema.go deleted file mode 100644 index ccdd39d96c..0000000000 --- a/indexer/postgres/internal/testdata/example_schema.go +++ /dev/null @@ -1,98 +0,0 @@ -package testdata - -import "cosmossdk.io/schema" - -var ExampleSchema schema.ModuleSchema - -var AllKindsObject schema.ObjectType - -func init() { - AllKindsObject = schema.ObjectType{ - Name: "all_kinds", - KeyFields: []schema.Field{ - { - Name: "id", - Kind: schema.Int64Kind, - }, - { - Name: "ts", - Kind: schema.TimeKind, - }, - }, - } - - for i := schema.InvalidKind + 1; i <= schema.MAX_VALID_KIND; i++ { - field := schema.Field{ - Name: i.String(), - Kind: i, - } - - switch i { - case schema.EnumKind: - field.EnumDefinition = MyEnum - case schema.Bech32AddressKind: - field.AddressPrefix = "foo" - default: - } - - AllKindsObject.ValueFields = append(AllKindsObject.ValueFields, field) - } - - ExampleSchema = schema.ModuleSchema{ - ObjectTypes: []schema.ObjectType{ - AllKindsObject, - SingletonObject, - VoteObject, - }, - } -} - -var SingletonObject = schema.ObjectType{ - Name: "singleton", - ValueFields: []schema.Field{ - { - Name: "foo", - Kind: schema.StringKind, - }, - { - Name: "bar", - Kind: schema.Int32Kind, - Nullable: true, - }, - { - Name: "an_enum", - Kind: schema.EnumKind, - EnumDefinition: MyEnum, - }, - }, -} - -var VoteObject = schema.ObjectType{ - Name: "vote", - KeyFields: []schema.Field{ - { - Name: "proposal", - Kind: schema.Int64Kind, - }, - { - Name: "address", - Kind: schema.Bech32AddressKind, - }, - }, - ValueFields: []schema.Field{ - { - Name: "vote", - Kind: schema.EnumKind, - EnumDefinition: schema.EnumDefinition{ - Name: "vote_type", - Values: []string{"yes", "no", "abstain"}, - }, - }, - }, - RetainDeletions: true, -} - -var MyEnum = schema.EnumDefinition{ - Name: "my_enum", - Values: []string{"a", "b", "c"}, -} diff --git a/indexer/postgres/module.go b/indexer/postgres/module.go deleted file mode 100644 index 57564700b7..0000000000 --- a/indexer/postgres/module.go +++ /dev/null @@ -1,61 +0,0 @@ -package postgres - -import ( - "context" - "fmt" - - "cosmossdk.io/schema" -) - -// ModuleIndexer manages the tables for a module. -type ModuleIndexer struct { - moduleName string - schema schema.ModuleSchema - tables map[string]*ObjectIndexer - definedEnums map[string]schema.EnumDefinition - options Options -} - -// NewModuleIndexer creates a new ModuleIndexer for the given module schema. -func NewModuleIndexer(moduleName string, modSchema schema.ModuleSchema, options Options) *ModuleIndexer { - return &ModuleIndexer{ - moduleName: moduleName, - schema: modSchema, - tables: map[string]*ObjectIndexer{}, - definedEnums: map[string]schema.EnumDefinition{}, - options: options, - } -} - -// InitializeSchema creates tables for all object types in the module schema and creates enum types. -func (m *ModuleIndexer) InitializeSchema(ctx context.Context, conn DBConn) error { - // create enum types - for _, typ := range m.schema.ObjectTypes { - err := m.createEnumTypesForFields(ctx, conn, typ.KeyFields) - if err != nil { - return err - } - - err = m.createEnumTypesForFields(ctx, conn, typ.ValueFields) - if err != nil { - return err - } - } - - // create tables for all object types - for _, typ := range m.schema.ObjectTypes { - tm := NewObjectIndexer(m.moduleName, typ, m.options) - m.tables[typ.Name] = tm - err := tm.CreateTable(ctx, conn) - if err != nil { - return fmt.Errorf("failed to create table for %s in module %s: %v", typ.Name, m.moduleName, err) //nolint:errorlint // using %v for go 1.12 compat - } - } - - return nil -} - -// ObjectIndexers returns the object indexers for the module. -func (m *ModuleIndexer) ObjectIndexers() map[string]*ObjectIndexer { - return m.tables -} diff --git a/indexer/postgres/object.go b/indexer/postgres/object.go deleted file mode 100644 index 78bbfdf636..0000000000 --- a/indexer/postgres/object.go +++ /dev/null @@ -1,44 +0,0 @@ -package postgres - -import ( - "fmt" - - "cosmossdk.io/schema" -) - -// ObjectIndexer is a helper struct that generates SQL for a given object type. -type ObjectIndexer struct { - moduleName string - typ schema.ObjectType - valueFields map[string]schema.Field - allFields map[string]schema.Field - options Options -} - -// NewObjectIndexer creates a new ObjectIndexer for the given object type. -func NewObjectIndexer(moduleName string, typ schema.ObjectType, options Options) *ObjectIndexer { - allFields := make(map[string]schema.Field) - valueFields := make(map[string]schema.Field) - - for _, field := range typ.KeyFields { - allFields[field.Name] = field - } - - for _, field := range typ.ValueFields { - valueFields[field.Name] = field - allFields[field.Name] = field - } - - return &ObjectIndexer{ - moduleName: moduleName, - typ: typ, - allFields: allFields, - valueFields: valueFields, - options: options, - } -} - -// TableName returns the name of the table for the object type scoped to its module. -func (tm *ObjectIndexer) TableName() string { - return fmt.Sprintf("%s_%s", tm.moduleName, tm.typ.Name) -} diff --git a/indexer/postgres/options.go b/indexer/postgres/options.go deleted file mode 100644 index be93d43b6c..0000000000 --- a/indexer/postgres/options.go +++ /dev/null @@ -1,10 +0,0 @@ -package postgres - -// Options are the options for module and object indexers. -type Options struct { - // DisableRetainDeletions disables retain deletions functionality even on object types that have it set. - DisableRetainDeletions bool - - // Logger is the logger for the indexer to use. - Logger SqlLogger -} diff --git a/indexer/postgres/sonar-project.properties b/indexer/postgres/sonar-project.properties deleted file mode 100644 index 6d7366413a..0000000000 --- a/indexer/postgres/sonar-project.properties +++ /dev/null @@ -1,16 +0,0 @@ -sonar.projectKey=cosmos-sdk-indexer-postgres -sonar.organization=cosmos - -sonar.projectName=Cosmos SDK - Postgres Indexer -sonar.project.monorepo.enabled=true - -sonar.sources=. -sonar.exclusions=**/*_test.go,**/*.pb.go,**/*.pulsar.go,**/*.pb.gw.go -sonar.coverage.exclusions=**/*_test.go,**/testutil/**,**/*.pb.go,**/*.pb.gw.go,**/*.pulsar.go,test_helpers.go,docs/** -sonar.tests=. -sonar.test.inclusions=**/*_test.go -sonar.go.coverage.reportPaths=coverage.out - -sonar.sourceEncoding=UTF-8 -sonar.scm.provider=git -sonar.scm.forceReloadAll=true diff --git a/indexer/postgres/tests/README.md b/indexer/postgres/tests/README.md deleted file mode 100644 index a57c861711..0000000000 --- a/indexer/postgres/tests/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# PostgreSQL Indexer Tests - -The majority of tests for the PostgreSQL indexer are stored in this separate `tests` go module to keep the main indexer module free of dependencies on any particular PostgreSQL driver. This allows users to choose their own driver and integrate the indexer free of any dependency conflict concerns. \ No newline at end of file diff --git a/indexer/postgres/tests/go.mod b/indexer/postgres/tests/go.mod deleted file mode 100644 index d5a2930425..0000000000 --- a/indexer/postgres/tests/go.mod +++ /dev/null @@ -1,33 +0,0 @@ -module cosmossdk.io/indexer/postgres/testing - -require ( - cosmossdk.io/indexer/postgres v0.0.0-00010101000000-000000000000 - cosmossdk.io/schema v0.1.1 - github.com/fergusstrange/embedded-postgres v1.27.0 - github.com/hashicorp/consul/sdk v0.16.1 - github.com/jackc/pgx/v5 v5.6.0 - github.com/stretchr/testify v1.9.0 - gotest.tools/v3 v3.5.1 -) - -require ( - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/puddle/v2 v2.2.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/lib/pq v1.10.4 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace cosmossdk.io/indexer/postgres => ../. - -go 1.22 diff --git a/indexer/postgres/tests/go.sum b/indexer/postgres/tests/go.sum deleted file mode 100644 index a4ba87b486..0000000000 --- a/indexer/postgres/tests/go.sum +++ /dev/null @@ -1,56 +0,0 @@ -cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= -cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fergusstrange/embedded-postgres v1.27.0 h1:RAlpWL194IhEpPgeJceTM0ifMJKhiSVxBVIDYB1Jee8= -github.com/fergusstrange/embedded-postgres v1.27.0/go.mod h1:t/MLs0h9ukYM6FSt99R7InCHs1nW0ordoVCcnzmpTYw= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= -github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= -github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= -github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= diff --git a/indexer/postgres/tests/init_schema_test.go b/indexer/postgres/tests/init_schema_test.go deleted file mode 100644 index 8c4288ba34..0000000000 --- a/indexer/postgres/tests/init_schema_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package tests - -import ( - "context" - "fmt" - "os" - "strings" - "testing" - - embeddedpostgres "github.com/fergusstrange/embedded-postgres" - "github.com/hashicorp/consul/sdk/freeport" - _ "github.com/jackc/pgx/v5/stdlib" // this is where we get our pgx database driver from - "github.com/stretchr/testify/require" - "gotest.tools/v3/golden" - - "cosmossdk.io/indexer/postgres" - "cosmossdk.io/indexer/postgres/internal/testdata" - "cosmossdk.io/schema/appdata" -) - -func TestInitSchema(t *testing.T) { - t.Run("default", func(t *testing.T) { - testInitSchema(t, false, "init_schema.txt") - }) - - t.Run("retain deletions disabled", func(t *testing.T) { - testInitSchema(t, true, "init_schema_no_retain_delete.txt") - }) -} - -func testInitSchema(t *testing.T, disableRetainDeletions bool, goldenFileName string) { - t.Helper() - connectionUrl := createTestDB(t) - - buf := &strings.Builder{} - logger := func(msg, sql string, params ...interface{}) { - _, err := fmt.Fprintln(buf, msg) - require.NoError(t, err) - _, err = fmt.Fprintln(buf, sql) - require.NoError(t, err) - if len(params) != 0 { - _, err = fmt.Fprintln(buf, "Params:", params) - require.NoError(t, err) - } - _, err = fmt.Fprintln(buf) - require.NoError(t, err) - } - listener, err := postgres.StartIndexer(context.Background(), logger, postgres.Config{ - DatabaseURL: connectionUrl, - DisableRetainDeletions: disableRetainDeletions, - }) - require.NoError(t, err) - - require.NotNil(t, listener.InitializeModuleData) - require.NoError(t, listener.InitializeModuleData(appdata.ModuleInitializationData{ - ModuleName: "test", - Schema: testdata.ExampleSchema, - })) - - require.NotNil(t, listener.Commit) - require.NoError(t, listener.Commit(appdata.CommitData{})) - - golden.Assert(t, buf.String(), goldenFileName) -} - -func createTestDB(t *testing.T) (connectionUrl string) { - t.Helper() - tempDir, err := os.MkdirTemp("", "postgres-indexer-test") - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, os.RemoveAll(tempDir)) - }) - - dbPort := freeport.GetOne(t) - pgConfig := embeddedpostgres.DefaultConfig(). - Port(uint32(dbPort)). - DataPath(tempDir) - - connectionUrl = pgConfig.GetConnectionURL() - pg := embeddedpostgres.NewDatabase(pgConfig) - require.NoError(t, pg.Start()) - t.Cleanup(func() { - require.NoError(t, pg.Stop()) - }) - - return -} diff --git a/indexer/postgres/tests/testdata/init_schema.txt b/indexer/postgres/tests/testdata/init_schema.txt deleted file mode 100644 index e2a0a1730e..0000000000 --- a/indexer/postgres/tests/testdata/init_schema.txt +++ /dev/null @@ -1,56 +0,0 @@ -Creating enum type -CREATE TYPE "test_my_enum" AS ENUM ('a', 'b', 'c'); - -Creating enum type -CREATE TYPE "test_vote_type" AS ENUM ('yes', 'no', 'abstain'); - -Creating table test_all_kinds -CREATE TABLE IF NOT EXISTS "test_all_kinds" ( - "id" BIGINT NOT NULL, - "ts" TIMESTAMPTZ GENERATED ALWAYS AS (nanos_to_timestamptz("ts_nanos")) STORED, - "ts_nanos" BIGINT NOT NULL, - "string" TEXT NOT NULL, - "bytes" BYTEA NOT NULL, - "int8" SMALLINT NOT NULL, - "uint8" SMALLINT NOT NULL, - "int16" SMALLINT NOT NULL, - "uint16" INTEGER NOT NULL, - "int32" INTEGER NOT NULL, - "uint32" BIGINT NOT NULL, - "int64" BIGINT NOT NULL, - "uint64" NUMERIC NOT NULL, - "integer" NUMERIC NOT NULL, - "decimal" NUMERIC NOT NULL, - "bool" BOOLEAN NOT NULL, - "time" TIMESTAMPTZ GENERATED ALWAYS AS (nanos_to_timestamptz("time_nanos")) STORED, - "time_nanos" BIGINT NOT NULL, - "duration" BIGINT NOT NULL, - "float32" REAL NOT NULL, - "float64" DOUBLE PRECISION NOT NULL, - "bech32address" TEXT NOT NULL, - "enum" "test_my_enum" NOT NULL, - "json" JSONB NOT NULL, - PRIMARY KEY ("id", "ts_nanos") -); -GRANT SELECT ON TABLE "test_all_kinds" TO PUBLIC; - -Creating table test_singleton -CREATE TABLE IF NOT EXISTS "test_singleton" ( - _id INTEGER NOT NULL CHECK (_id = 1), - "foo" TEXT NOT NULL, - "bar" INTEGER NULL, - "an_enum" "test_my_enum" NOT NULL, - PRIMARY KEY (_id) -); -GRANT SELECT ON TABLE "test_singleton" TO PUBLIC; - -Creating table test_vote -CREATE TABLE IF NOT EXISTS "test_vote" ( - "proposal" BIGINT NOT NULL, - "address" TEXT NOT NULL, - "vote" "test_vote_type" NOT NULL, - _deleted BOOLEAN NOT NULL DEFAULT FALSE, - PRIMARY KEY ("proposal", "address") -); -GRANT SELECT ON TABLE "test_vote" TO PUBLIC; - diff --git a/indexer/postgres/tests/testdata/init_schema_no_retain_delete.txt b/indexer/postgres/tests/testdata/init_schema_no_retain_delete.txt deleted file mode 100644 index 0d8cdad2cd..0000000000 --- a/indexer/postgres/tests/testdata/init_schema_no_retain_delete.txt +++ /dev/null @@ -1,55 +0,0 @@ -Creating enum type -CREATE TYPE "test_my_enum" AS ENUM ('a', 'b', 'c'); - -Creating enum type -CREATE TYPE "test_vote_type" AS ENUM ('yes', 'no', 'abstain'); - -Creating table test_all_kinds -CREATE TABLE IF NOT EXISTS "test_all_kinds" ( - "id" BIGINT NOT NULL, - "ts" TIMESTAMPTZ GENERATED ALWAYS AS (nanos_to_timestamptz("ts_nanos")) STORED, - "ts_nanos" BIGINT NOT NULL, - "string" TEXT NOT NULL, - "bytes" BYTEA NOT NULL, - "int8" SMALLINT NOT NULL, - "uint8" SMALLINT NOT NULL, - "int16" SMALLINT NOT NULL, - "uint16" INTEGER NOT NULL, - "int32" INTEGER NOT NULL, - "uint32" BIGINT NOT NULL, - "int64" BIGINT NOT NULL, - "uint64" NUMERIC NOT NULL, - "integer" NUMERIC NOT NULL, - "decimal" NUMERIC NOT NULL, - "bool" BOOLEAN NOT NULL, - "time" TIMESTAMPTZ GENERATED ALWAYS AS (nanos_to_timestamptz("time_nanos")) STORED, - "time_nanos" BIGINT NOT NULL, - "duration" BIGINT NOT NULL, - "float32" REAL NOT NULL, - "float64" DOUBLE PRECISION NOT NULL, - "bech32address" TEXT NOT NULL, - "enum" "test_my_enum" NOT NULL, - "json" JSONB NOT NULL, - PRIMARY KEY ("id", "ts_nanos") -); -GRANT SELECT ON TABLE "test_all_kinds" TO PUBLIC; - -Creating table test_singleton -CREATE TABLE IF NOT EXISTS "test_singleton" ( - _id INTEGER NOT NULL CHECK (_id = 1), - "foo" TEXT NOT NULL, - "bar" INTEGER NULL, - "an_enum" "test_my_enum" NOT NULL, - PRIMARY KEY (_id) -); -GRANT SELECT ON TABLE "test_singleton" TO PUBLIC; - -Creating table test_vote -CREATE TABLE IF NOT EXISTS "test_vote" ( - "proposal" BIGINT NOT NULL, - "address" TEXT NOT NULL, - "vote" "test_vote_type" NOT NULL, - PRIMARY KEY ("proposal", "address") -); -GRANT SELECT ON TABLE "test_vote" TO PUBLIC; - diff --git a/runtime/v2/builder.go b/runtime/v2/builder.go index 82e418349d..b7869fa3f0 100644 --- a/runtime/v2/builder.go +++ b/runtime/v2/builder.go @@ -5,6 +5,9 @@ import ( "encoding/json" "fmt" "io" + "path/filepath" + + "github.com/spf13/viper" "cosmossdk.io/core/appmodule" appmodulev2 "cosmossdk.io/core/appmodule/v2" @@ -13,6 +16,7 @@ import ( "cosmossdk.io/server/v2/appmanager" "cosmossdk.io/server/v2/stf" "cosmossdk.io/server/v2/stf/branch" + "cosmossdk.io/store/v2/db" rootstore "cosmossdk.io/store/v2/root" ) @@ -22,6 +26,7 @@ import ( type AppBuilder[T transaction.Tx] struct { app *App[T] storeOptions *rootstore.FactoryOptions + viper *viper.Viper // the following fields are used to overwrite the default branch func(state store.ReaderMap) store.WriterMap @@ -119,6 +124,30 @@ func (a *AppBuilder[T]) Build(opts ...AppBuilderOption[T]) (*App[T], error) { } a.app.stf = stf + v := a.viper + home := v.GetString(FlagHome) + + storeOpts := rootstore.DefaultStoreOptions() + if s := v.Sub("store.options"); s != nil { + if err := s.Unmarshal(&storeOpts); err != nil { + return nil, fmt.Errorf("failed to store options: %w", err) + } + } + + scRawDb, err := db.NewDB(db.DBType(v.GetString("store.app-db-backend")), "application", filepath.Join(home, "data"), nil) + if err != nil { + panic(err) + } + + storeOptions := &rootstore.FactoryOptions{ + Logger: a.app.logger, + RootDir: home, + Options: storeOpts, + StoreKeys: append(a.app.storeKeys, "stf"), + SCRawDB: scRawDb, + } + a.storeOptions = storeOptions + rs, err := rootstore.CreateRootStore(a.storeOptions) if err != nil { return nil, fmt.Errorf("failed to create root store: %w", err) diff --git a/runtime/v2/go.mod b/runtime/v2/go.mod index fb0c2fb841..3c7b6bab3c 100644 --- a/runtime/v2/go.mod +++ b/runtime/v2/go.mod @@ -7,9 +7,6 @@ replace ( cosmossdk.io/api => ../../api cosmossdk.io/core => ../../core cosmossdk.io/core/testing => ../../core/testing - cosmossdk.io/server/v2/appmanager => ../../server/v2/appmanager - cosmossdk.io/server/v2/stf => ../../server/v2/stf - cosmossdk.io/store/v2 => ../../store/v2 cosmossdk.io/x/accounts => ../../x/accounts cosmossdk.io/x/auth => ../../x/auth cosmossdk.io/x/bank => ../../x/bank @@ -22,16 +19,17 @@ replace ( require ( cosmossdk.io/api v0.7.5 - cosmossdk.io/core v0.12.1-0.20240726110027-5c90246b3f9f + cosmossdk.io/core v0.12.1-0.20231114100755-569e3ff6a0d7 cosmossdk.io/depinject v1.0.0 cosmossdk.io/log v1.3.1 - cosmossdk.io/server/v2/appmanager v0.0.0-20240726110027-5c90246b3f9f - cosmossdk.io/server/v2/stf v0.0.0-20240726110027-5c90246b3f9f - cosmossdk.io/store/v2 v2.0.0-20240726110027-5c90246b3f9f + cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6 // main + cosmossdk.io/server/v2/stf v0.0.0-20240731205446-aee9803a0af6 // main + cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6 // main cosmossdk.io/x/tx v0.13.3 github.com/cosmos/gogoproto v1.5.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - google.golang.org/grpc v1.65.0 + github.com/spf13/viper v1.19.0 + golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc + google.golang.org/grpc v1.64.1 google.golang.org/protobuf v1.34.2 ) @@ -39,14 +37,13 @@ require ( buf.build/gen/go/cometbft/cometbft/protocolbuffers/go v1.34.2-20240701160653-fedbb9acfd2f.2 // indirect buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.34.2-20240130113600-88ef6483f90f.2 // indirect cosmossdk.io/core/testing v0.0.0-00010101000000-000000000000 // indirect - cosmossdk.io/errors v1.0.1 // indirect + cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 // indirect github.com/DataDog/zstd v1.5.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cockroachdb/errors v1.11.3 // indirect - github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect + github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.1 // indirect + github.com/cockroachdb/pebble v1.1.0 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cosmos/cosmos-db v1.0.2 // indirect @@ -55,7 +52,8 @@ require ( github.com/cosmos/ics23/go v0.10.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/dot v1.6.2 // indirect - github.com/getsentry/sentry-go v0.28.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.27.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect @@ -63,15 +61,19 @@ require ( github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.3 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/linxGnu/grocksdb v1.9.2 // indirect + github.com/linxGnu/grocksdb v1.8.14 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/gomega v1.34.0 // indirect + github.com/onsi/gomega v1.28.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -80,18 +82,26 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/zerolog v1.33.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.9.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tendermint/go-amino v0.16.0 // indirect github.com/tidwall/btree v1.7.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.25.0 // indirect golang.org/x/net v0.27.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/runtime/v2/go.sum b/runtime/v2/go.sum index 1a705f4162..fd2ee86987 100644 --- a/runtime/v2/go.sum +++ b/runtime/v2/go.sum @@ -4,10 +4,16 @@ buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.34.2-20240130113600-88e buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.34.2-20240130113600-88ef6483f90f.2/go.mod h1:HqcXMSa5qnNuakaMUo+hWhF51mKbcrZxGl9Vp5EeJXc= cosmossdk.io/depinject v1.0.0 h1:dQaTu6+O6askNXO06+jyeUAnF2/ssKwrrszP9t5q050= cosmossdk.io/depinject v1.0.0/go.mod h1:zxK/h3HgHoA/eJVtiSsoaRaRA2D5U4cJ5thIG4ssbB8= -cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= -cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 h1:IQNdY2kB+k+1OM2DvqFG1+UgeU1JzZrWtwuWzI3ZfwA= +cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5/go.mod h1:0CuYKkFHxc1vw2JC+t21THBCALJVROrWVR/3PQ1urpc= cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= +cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6 h1:vrHmVjfEjEwQh90dim272gYq7OFILg4Yrv3XzreMpe4= +cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6/go.mod h1:Xm5IOSjw45Sew7fiVckaTCIU5oQPs20V+54NOqR3H4o= +cosmossdk.io/server/v2/stf v0.0.0-20240731205446-aee9803a0af6 h1:F8yfqCf1cAwuZZnIxinmzr/2nmLjhK9K/BJfBjW3nJ0= +cosmossdk.io/server/v2/stf v0.0.0-20240731205446-aee9803a0af6/go.mod h1:IUbZp79IZ4NCR2eNXA0utcQOS8lz34BvsAWTeCGwGAM= +cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6 h1:/ffIfMKzoCVUI38t5Vq3BNW9U8exRMxK5QgS/ujn0lA= +cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6/go.mod h1:aG3brMLcldPsdhfkdCaisGDIe+tXTNWdUDt5JYsRDl8= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -29,14 +35,12 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= -github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4= -github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.1 h1:XnKU22oiCLy2Xn8vp1re67cXg4SAasg/WDt1NtcRFaw= -github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= +github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= @@ -66,8 +70,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getsentry/sentry-go v0.28.1 h1:zzaSm/vHmGllRM6Tpx1492r0YDzauArdBfkJRtY6P5k= -github.com/getsentry/sentry-go v0.28.1/go.mod h1:1fQZ+7l7eeJ3wYi82q5Hg8GqAPgefRq+FP/QhafYVgg= +github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= +github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -125,6 +129,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -132,8 +138,8 @@ github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -143,8 +149,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linxGnu/grocksdb v1.9.2 h1:O3mzvO0wuzQ9mtlHbDrShixyVjVbmuqTjFrzlf43wZ8= -github.com/linxGnu/grocksdb v1.9.2/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= +github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= +github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -154,6 +162,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -162,9 +172,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -175,10 +184,12 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= +github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -214,19 +225,39 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99 github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= @@ -236,14 +267,16 @@ github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EU github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc h1:O9NuF4s+E/PvMIy+9IUZB9znFwUIXEWSstNjek6VpVg= +golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -312,12 +345,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f h1:RARaIm8pxYuxyNPbBQf5igT7XdOyCNtat1qAT2ZxjU4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 h1:SbSDUWW1PAO24TNpLdeheoYPd7kllICcLU52x6eD4kQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -334,6 +367,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -342,6 +377,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= diff --git a/runtime/v2/manager.go b/runtime/v2/manager.go index 2b134bdb99..244353674c 100644 --- a/runtime/v2/manager.go +++ b/runtime/v2/manager.go @@ -166,7 +166,7 @@ func (m *MM[T]) InitGenesisJSON( case appmodulev2.HasGenesis: m.logger.Debug("running initialization for module", "module", moduleName) if err := module.InitGenesis(ctx, genesisData[moduleName]); err != nil { - return err + return fmt.Errorf("init module %s: %w", moduleName, err) } case appmodulev2.HasABCIGenesis: m.logger.Debug("running initialization for module", "module", moduleName) @@ -410,7 +410,7 @@ func (m *MM[T]) RunMigrations(ctx context.Context, fromVM appmodulev2.VersionMap // The module manager assumes only one module will update the validator set, and it can't be a new module. if len(moduleValUpdates) > 0 { - return nil, fmt.Errorf("validator InitGenesis update is already set by another module") + return nil, errors.New("validator InitGenesis update is already set by another module") } } } diff --git a/runtime/v2/module.go b/runtime/v2/module.go index faa07a2b99..fbcf391b1f 100644 --- a/runtime/v2/module.go +++ b/runtime/v2/module.go @@ -6,6 +6,7 @@ import ( "slices" "github.com/cosmos/gogoproto/proto" + "github.com/spf13/viper" "google.golang.org/grpc" "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoregistry" @@ -28,7 +29,6 @@ import ( "cosmossdk.io/log" "cosmossdk.io/runtime/v2/services" "cosmossdk.io/server/v2/stf" - rootstorev2 "cosmossdk.io/store/v2/root" ) var ( @@ -151,7 +151,7 @@ type AppInputs struct { InterfaceRegistrar registry.InterfaceRegistrar LegacyAmino legacy.Amino Logger log.Logger - StoreOptions *rootstorev2.FactoryOptions `optional:"true"` + Viper *viper.Viper `optional:"true"` } func SetupAppBuilder(inputs AppInputs) { @@ -162,10 +162,8 @@ func SetupAppBuilder(inputs AppInputs) { app.moduleManager.RegisterInterfaces(inputs.InterfaceRegistrar) app.moduleManager.RegisterLegacyAminoCodec(inputs.LegacyAmino) - if inputs.StoreOptions != nil { - inputs.AppBuilder.storeOptions = inputs.StoreOptions - inputs.AppBuilder.storeOptions.StoreKeys = inputs.AppBuilder.app.storeKeys - inputs.AppBuilder.storeOptions.StoreKeys = append(inputs.AppBuilder.storeOptions.StoreKeys, "stf") + if inputs.Viper != nil { + inputs.AppBuilder.viper = inputs.Viper } } diff --git a/runtime/v2/types.go b/runtime/v2/types.go index ac606cbd54..0138dd802c 100644 --- a/runtime/v2/types.go +++ b/runtime/v2/types.go @@ -12,7 +12,10 @@ import ( "cosmossdk.io/x/tx/signing" ) -const ModuleName = "runtime" +const ( + ModuleName = "runtime" + FlagHome = "home" +) // ValidateProtoAnnotations validates that the proto annotations are correct. // More specifically, it verifies: diff --git a/schema/CHANGELOG.md b/schema/CHANGELOG.md deleted file mode 100644 index 0c3c9d0385..0000000000 --- a/schema/CHANGELOG.md +++ /dev/null @@ -1,37 +0,0 @@ - - -# Changelog - -## [Unreleased] diff --git a/schema/README.md b/schema/README.md deleted file mode 100644 index d19d54c9e8..0000000000 --- a/schema/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Logical State Schema Framework - -The `cosmossdk.io/schema` base module is designed to provide a stable, **zero-dependency** base layer for specifying the **logical representation of module state schemas** and implementing **state indexing**. This is intended to be used primarily for indexing modules in external databases and providing a standard human-readable state representation for genesis import and export. - -The schema defined in this library does not aim to be general purpose and cover all types of schemas, such as those used for defining transactions. For instance, this schema does not include many types of composite objects such as nested objects and arrays. Rather, the schema defined here aims to cover _state_ schemas only which are implemented as key-value pairs and usually have direct mappings to relational database tables or objects in a document store. - -Also, this schema does not cover physical state layout and byte-level encoding, but simply describes a common logical format. - -## `HasModuleCodec` Interface - -Any module which supports logical decoding and/or encoding should implement the `HasModuleCodec` interface. This interface provides a way to get the codec for the module, which can be used to decode the module's state and/or apply logical updates. - -State frameworks such as `collections` or `orm` should directly provide `ModuleCodec` implementations so that this functionality basically comes for free if a compatible framework is used. Modules that do not use one of these frameworks can choose to manually implement logical decoding and/or encoding. diff --git a/schema/appdata/README.md b/schema/appdata/README.md deleted file mode 100644 index f6a3e67663..0000000000 --- a/schema/appdata/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# App Data - -The `appdata` package defines the basic types for streaming blockchain event and state data to external listeners, with a specific focus on supporting logical decoding and indexing of state. - -A blockchain data source should accept a `Listener` instance and invoke the provided callbacks in the correct order. A downstream listener should provide a `Listener` instance and perform operations based on the data passed to its callbacks. - -## `Listener` Callback Order - -`Listener` callbacks should be called in this order - -```mermaid -sequenceDiagram - actor Source - actor Target - Source ->> Target: Initialize - Source -->> Target: InitializeModuleSchema - loop Block - Source ->> Target: StartBlock - Source ->> Target: OnBlockHeader - Source -->> Target: OnTx - Source -->> Target: OnEvent - Source -->> Target: OnKVPair - Source -->> Target: OnObjectUpdate - Source ->> Target: Commit - end -``` - -`Initialize` must be called before any other method and should only be invoked once. `InitializeModuleSchema` should be called at most once for every module with logical data. - -Sources will generally only call `InitializeModuleSchema` and `OnObjectUpdate` if they have native logical decoding capabilities. Usually, the indexer framework will provide this functionality based on `OnKVPair` data and `schema.HasModuleCodec` implementations. - -`StartBlock` and `OnBlockHeader` should be called only once at the beginning of a block, and `Commit` should be called only once at the end of a block. The `OnTx`, `OnEvent`, `OnKVPair` and `OnObjectUpdate` must be called after `OnBlockHeader`, may be called multiple times within a block and indexers should not assume that the order is logical unless `InitializationData.HasEventAlignedWrites` is true. diff --git a/schema/appdata/data.go b/schema/appdata/data.go deleted file mode 100644 index 7e02fbc5db..0000000000 --- a/schema/appdata/data.go +++ /dev/null @@ -1,97 +0,0 @@ -package appdata - -import ( - "encoding/json" - - "cosmossdk.io/schema" -) - -// ModuleInitializationData represents data for related to module initialization, in particular -// the module's schema. -type ModuleInitializationData struct { - // ModuleName is the name of the module. - ModuleName string - - // Schema is the schema of the module. - Schema schema.ModuleSchema -} - -// StartBlockData represents the data that is passed to a listener when a block is started. -type StartBlockData struct { - // Height is the height of the block. - Height uint64 - - // Bytes is the raw byte representation of the block header. It may be nil if the source does not provide it. - HeaderBytes ToBytes - - // JSON is the JSON representation of the block header. It should generally be a JSON object. - // It may be nil if the source does not provide it. - HeaderJSON ToJSON -} - -// TxData represents the raw transaction data that is passed to a listener. -type TxData struct { - // TxIndex is the index of the transaction in the block. - TxIndex int32 - - // Bytes is the raw byte representation of the transaction. - Bytes ToBytes - - // JSON is the JSON representation of the transaction. It should generally be a JSON object. - JSON ToJSON -} - -// EventData represents event data that is passed to a listener. -type EventData struct { - // TxIndex is the index of the transaction in the block to which this event is associated. - // It should be set to a negative number if the event is not associated with a transaction. - // Canonically -1 should be used to represent begin block processing and -2 should be used to - // represent end block processing. - TxIndex int32 - - // MsgIndex is the index of the message in the transaction to which this event is associated. - // If TxIndex is negative, this index could correspond to the index of the message in - // begin or end block processing if such indexes exist, or it can be set to zero. - MsgIndex uint32 - - // EventIndex is the index of the event in the message to which this event is associated. - EventIndex uint32 - - // Type is the type of the event. - Type string - - // Data is the JSON representation of the event data. It should generally be a JSON object. - Data ToJSON -} - -// ToBytes is a function that lazily returns the raw byte representation of data. -type ToBytes = func() ([]byte, error) - -// ToJSON is a function that lazily returns the JSON representation of data. -type ToJSON = func() (json.RawMessage, error) - -// KVPairData represents a batch of key-value pair data that is passed to a listener. -type KVPairData struct { - Updates []ModuleKVPairUpdate -} - -// ModuleKVPairUpdate represents a key-value pair update for a specific module. -type ModuleKVPairUpdate struct { - // ModuleName is the name of the module that the key-value pair belongs to. - ModuleName string - - // Update is the key-value pair update. - Update schema.KVPairUpdate -} - -// ObjectUpdateData represents object update data that is passed to a listener. -type ObjectUpdateData struct { - // ModuleName is the name of the module that the update corresponds to. - ModuleName string - - // Updates are the object updates. - Updates []schema.ObjectUpdate -} - -// CommitData represents commit data. It is empty for now, but fields could be added later. -type CommitData struct{} diff --git a/schema/appdata/listener.go b/schema/appdata/listener.go deleted file mode 100644 index d4786cb025..0000000000 --- a/schema/appdata/listener.go +++ /dev/null @@ -1,41 +0,0 @@ -package appdata - -// Listener is an interface that defines methods for listening to both raw and logical blockchain data. -// It is valid for any of the methods to be nil, in which case the listener will not be called for that event. -// Listeners should understand the guarantees that are provided by the source they are listening to and -// understand which methods will or will not be called. For instance, most blockchains will not do logical -// decoding of data out of the box, so the InitializeModuleData and OnObjectUpdate methods will not be called. -// These methods will only be called when listening logical decoding is setup. -type Listener struct { - // InitializeModuleData should be called whenever the blockchain process starts OR whenever - // logical decoding of a module is initiated. An indexer listening to this event - // should ensure that they have performed whatever initialization steps (such as database - // migrations) required to receive OnObjectUpdate events for the given module. If the - // indexer's schema is incompatible with the module's on-chain schema, the listener should return - // an error. Module names must conform to the NameFormat regular expression. - InitializeModuleData func(ModuleInitializationData) error - - // StartBlock is called at the beginning of processing a block. - StartBlock func(StartBlockData) error - - // OnTx is called when a transaction is received. - OnTx func(TxData) error - - // OnEvent is called when an event is received. - OnEvent func(EventData) error - - // OnKVPair is called when a key-value has been written to the store for a given module. - // Module names must conform to the NameFormat regular expression. - OnKVPair func(updates KVPairData) error - - // OnObjectUpdate is called whenever an object is updated in a module's state. This is only called - // when logical data is available. It should be assumed that the same data in raw form - // is also passed to OnKVPair. Module names must conform to the NameFormat regular expression. - OnObjectUpdate func(ObjectUpdateData) error - - // Commit is called when state is committed, usually at the end of a block. Any - // indexers should commit their data when this is called and return an error if - // they are unable to commit. Data sources MUST call Commit when data is committed, - // otherwise it should be assumed that indexers have not persisted their state. - Commit func(CommitData) error -} diff --git a/schema/appdata/packet.go b/schema/appdata/packet.go deleted file mode 100644 index e5fe6be966..0000000000 --- a/schema/appdata/packet.go +++ /dev/null @@ -1,61 +0,0 @@ -package appdata - -// Packet is the interface that all listener data structures implement so that this data can be "packetized" -// and processed in a stream, possibly asynchronously. -type Packet interface { - apply(*Listener) error -} - -// SendPacket sends a packet to a listener invoking the appropriate callback for this packet if one is registered. -func (l Listener) SendPacket(p Packet) error { - return p.apply(&l) -} - -func (m ModuleInitializationData) apply(l *Listener) error { - if l.InitializeModuleData == nil { - return nil - } - return l.InitializeModuleData(m) -} - -func (b StartBlockData) apply(l *Listener) error { - if l.StartBlock == nil { - return nil - } - return l.StartBlock(b) -} - -func (t TxData) apply(l *Listener) error { - if l.OnTx == nil { - return nil - } - return l.OnTx(t) -} - -func (e EventData) apply(l *Listener) error { - if l.OnEvent == nil { - return nil - } - return l.OnEvent(e) -} - -func (k KVPairData) apply(l *Listener) error { - if l.OnKVPair == nil { - return nil - } - return l.OnKVPair(k) -} - -func (o ObjectUpdateData) apply(l *Listener) error { - if l.OnObjectUpdate == nil { - return nil - } - return l.OnObjectUpdate(o) -} - -func (c CommitData) apply(l *Listener) error { - if l.Commit == nil { - return nil - } - return l.Commit(c) -} diff --git a/schema/decoder.go b/schema/decoder.go deleted file mode 100644 index 86aedec9f9..0000000000 --- a/schema/decoder.go +++ /dev/null @@ -1,40 +0,0 @@ -package schema - -// HasModuleCodec is an interface that modules can implement to provide a ModuleCodec. -// Usually these modules would also implement appmodule.AppModule, but that is not included -// to keep this package free of any dependencies. -type HasModuleCodec interface { - // ModuleCodec returns a ModuleCodec for the module. - ModuleCodec() (ModuleCodec, error) -} - -// ModuleCodec is a struct that contains the schema and a KVDecoder for a module. -type ModuleCodec struct { - // Schema is the schema for the module. It is required. - Schema ModuleSchema - - // KVDecoder is a function that decodes a key-value pair into an ObjectUpdate. - // If it is nil, the module doesn't support state decoding directly. - KVDecoder KVDecoder -} - -// KVDecoder is a function that decodes a key-value pair into one or more ObjectUpdate's. -// If the KV-pair doesn't represent object updates, the function should return nil as the first -// and no error. The error result should only be non-nil when the decoder expected -// to parse a valid update and was unable to. In the case of an error, the decoder may return -// a non-nil value for the first return value, which can indicate which parts of the update -// were decodable to aid debugging. -type KVDecoder = func(KVPairUpdate) ([]ObjectUpdate, error) - -// KVPairUpdate represents a key-value pair set or delete. -type KVPairUpdate struct { - // Key is the key of the key-value pair. - Key []byte - - // Value is the value of the key-value pair. It should be ignored when Delete is true. - Value []byte - - // Delete is a flag that indicates that the key-value pair was deleted. If it is false, - // then it is assumed that this has been a set operation. - Delete bool -} diff --git a/schema/decoding/decoding_test.go b/schema/decoding/decoding_test.go deleted file mode 100644 index d4308390d2..0000000000 --- a/schema/decoding/decoding_test.go +++ /dev/null @@ -1,458 +0,0 @@ -package decoding - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "testing" - - "cosmossdk.io/schema" - "cosmossdk.io/schema/appdata" -) - -func TestMiddleware(t *testing.T) { - tl := newTestFixture(t) - listener, err := Middleware(tl.Listener, tl.resolver, MiddlewareOptions{}) - if err != nil { - t.Fatal("unexpected error", err) - } - tl.setListener(listener) - - tl.bankMod.Mint("bob", "foo", 100) - err = tl.bankMod.Send("bob", "alice", "foo", 50) - if err != nil { - t.Fatal("unexpected error", err) - } - - tl.oneMod.SetValue("abc") - - expectedBank := []schema.ObjectUpdate{ - { - TypeName: "supply", - Key: []interface{}{"foo"}, - Value: uint64(100), - }, - { - TypeName: "balances", - Key: []interface{}{"bob", "foo"}, - Value: uint64(100), - }, - { - TypeName: "balances", - Key: []interface{}{"bob", "foo"}, - Value: uint64(50), - }, - { - TypeName: "balances", - Key: []interface{}{"alice", "foo"}, - Value: uint64(50), - }, - } - - if !reflect.DeepEqual(tl.bankUpdates, expectedBank) { - t.Fatalf("expected %v, got %v", expectedBank, tl.bankUpdates) - } - - expectedOne := []schema.ObjectUpdate{ - {TypeName: "item", Value: "abc"}, - } - - if !reflect.DeepEqual(tl.oneValueUpdates, expectedOne) { - t.Fatalf("expected %v, got %v", expectedOne, tl.oneValueUpdates) - } -} - -func TestMiddleware_filtered(t *testing.T) { - tl := newTestFixture(t) - listener, err := Middleware(tl.Listener, tl.resolver, MiddlewareOptions{ - ModuleFilter: func(moduleName string) bool { - return moduleName == "one" //nolint:goconst // adding constants for this would impede readability - }, - }) - if err != nil { - t.Fatal("unexpected error", err) - } - tl.setListener(listener) - - tl.bankMod.Mint("bob", "foo", 100) - tl.oneMod.SetValue("abc") - - if len(tl.bankUpdates) != 0 { - t.Fatalf("expected no bank updates") - } - - expectedOne := []schema.ObjectUpdate{ - {TypeName: "item", Value: "abc"}, - } - - if !reflect.DeepEqual(tl.oneValueUpdates, expectedOne) { - t.Fatalf("expected %v, got %v", expectedOne, tl.oneValueUpdates) - } -} - -func TestSync(t *testing.T) { - tl := newTestFixture(t) - tl.bankMod.Mint("bob", "foo", 100) - err := tl.bankMod.Send("bob", "alice", "foo", 50) - if err != nil { - t.Fatal("unexpected error", err) - } - - tl.oneMod.SetValue("def") - - err = Sync(tl.Listener, tl.multiStore, tl.resolver, SyncOptions{}) - if err != nil { - t.Fatal("unexpected error", err) - } - - expected := []schema.ObjectUpdate{ - { - TypeName: "balances", - Key: []interface{}{"alice", "foo"}, - Value: uint64(50), - }, - { - TypeName: "balances", - Key: []interface{}{"bob", "foo"}, - Value: uint64(50), - }, - { - TypeName: "supply", - Key: []interface{}{"foo"}, - Value: uint64(100), - }, - } - - if !reflect.DeepEqual(tl.bankUpdates, expected) { - t.Fatalf("expected %v, got %v", expected, tl.bankUpdates) - } - - expectedOne := []schema.ObjectUpdate{ - {TypeName: "item", Value: "def"}, - } - - if !reflect.DeepEqual(tl.oneValueUpdates, expectedOne) { - t.Fatalf("expected %v, got %v", expectedOne, tl.oneValueUpdates) - } -} - -func TestSync_filtered(t *testing.T) { - tl := newTestFixture(t) - tl.bankMod.Mint("bob", "foo", 100) - tl.oneMod.SetValue("def") - - err := Sync(tl.Listener, tl.multiStore, tl.resolver, SyncOptions{ - ModuleFilter: func(moduleName string) bool { - return moduleName == "one" - }, - }) - if err != nil { - t.Fatal("unexpected error", err) - } - - if len(tl.bankUpdates) != 0 { - t.Fatalf("expected no bank updates") - } - - expectedOne := []schema.ObjectUpdate{ - {TypeName: "item", Value: "def"}, - } - - if !reflect.DeepEqual(tl.oneValueUpdates, expectedOne) { - t.Fatalf("expected %v, got %v", expectedOne, tl.oneValueUpdates) - } -} - -type testFixture struct { - appdata.Listener - bankUpdates []schema.ObjectUpdate - oneValueUpdates []schema.ObjectUpdate - resolver DecoderResolver - multiStore *testMultiStore - bankMod *exampleBankModule - oneMod *oneValueModule -} - -func newTestFixture(t *testing.T) *testFixture { - t.Helper() - res := &testFixture{} - res.Listener = appdata.Listener{ - InitializeModuleData: func(data appdata.ModuleInitializationData) error { - var expected schema.ModuleSchema - switch data.ModuleName { - case "bank": - expected = exampleBankSchema - case "one": - - expected = oneValueModSchema - default: - t.Fatalf("unexpected module %s", data.ModuleName) - } - - if !reflect.DeepEqual(data.Schema, expected) { - t.Errorf("expected %v, got %v", expected, data.Schema) - } - return nil - }, - OnObjectUpdate: func(data appdata.ObjectUpdateData) error { - switch data.ModuleName { - case "bank": - res.bankUpdates = append(res.bankUpdates, data.Updates...) - case "one": - res.oneValueUpdates = append(res.oneValueUpdates, data.Updates...) - default: - t.Errorf("unexpected module %s", data.ModuleName) - } - return nil - }, - } - res.multiStore = newTestMultiStore() - res.bankMod = &exampleBankModule{ - store: res.multiStore.newTestStore(t, "bank"), - } - res.oneMod = &oneValueModule{ - store: res.multiStore.newTestStore(t, "one"), - } - modSet := map[string]interface{}{ - "bank": res.bankMod, - "one": res.oneMod, - } - res.resolver = ModuleSetDecoderResolver(modSet) - return res -} - -func (f *testFixture) setListener(listener appdata.Listener) { - f.bankMod.store.listener = listener - f.oneMod.store.listener = listener -} - -type testMultiStore struct { - stores map[string]*testStore -} - -type testStore struct { - t *testing.T - modName string - store map[string][]byte - listener appdata.Listener -} - -func newTestMultiStore() *testMultiStore { - return &testMultiStore{ - stores: map[string]*testStore{}, - } -} - -var _ SyncSource = &testMultiStore{} - -func (ms *testMultiStore) IterateAllKVPairs(moduleName string, fn func(key, value []byte) error) error { - s, ok := ms.stores[moduleName] - if !ok { - return fmt.Errorf("don't have state for module %s", moduleName) - } - - var keys []string - for key := range s.store { - keys = append(keys, key) - } - sort.Strings(keys) - for _, key := range keys { - err := fn([]byte(key), s.store[key]) - if err != nil { - return err - } - } - return nil -} - -func (ms *testMultiStore) newTestStore(t *testing.T, modName string) *testStore { - t.Helper() - s := &testStore{ - t: t, - modName: modName, - store: map[string][]byte{}, - } - ms.stores[modName] = s - return s -} - -func (t testStore) Get(key []byte) []byte { - return t.store[string(key)] -} - -func (t testStore) GetUInt64(key []byte) uint64 { - bz := t.store[string(key)] - if len(bz) == 0 { - return 0 - } - x, err := strconv.ParseUint(string(bz), 10, 64) - if err != nil { - t.t.Fatalf("unexpected error: %v", err) - } - return x -} - -func (t testStore) Set(key, value []byte) { - if t.listener.OnKVPair != nil { - err := t.listener.OnKVPair(appdata.KVPairData{Updates: []appdata.ModuleKVPairUpdate{ - { - ModuleName: t.modName, - Update: schema.KVPairUpdate{ - Key: key, - Value: value, - }, - }, - }}) - if err != nil { - t.t.Fatalf("unexpected error: %v", err) - } - } - t.store[string(key)] = value -} - -func (t testStore) SetUInt64(key []byte, value uint64) { - t.Set(key, []byte(strconv.FormatUint(value, 10))) -} - -type exampleBankModule struct { - store *testStore -} - -func (e exampleBankModule) Mint(acct, denom string, amount uint64) { - key := supplyKey(denom) - e.store.SetUInt64(key, e.store.GetUInt64(key)+amount) - e.addBalance(acct, denom, amount) -} - -func (e exampleBankModule) Send(from, to, denom string, amount uint64) error { - err := e.subBalance(from, denom, amount) - if err != nil { - return nil - } - e.addBalance(to, denom, amount) - return nil -} - -func (e exampleBankModule) GetBalance(acct, denom string) uint64 { - return e.store.GetUInt64(balanceKey(acct, denom)) -} - -func (e exampleBankModule) GetSupply(denom string) uint64 { - return e.store.GetUInt64(supplyKey(denom)) -} - -func balanceKey(acct, denom string) []byte { - return []byte(fmt.Sprintf("balance/%s/%s", acct, denom)) -} - -func supplyKey(denom string) []byte { - return []byte(fmt.Sprintf("supply/%s", denom)) -} - -func (e exampleBankModule) addBalance(acct, denom string, amount uint64) { - key := balanceKey(acct, denom) - e.store.SetUInt64(key, e.store.GetUInt64(key)+amount) -} - -func (e exampleBankModule) subBalance(acct, denom string, amount uint64) error { - key := balanceKey(acct, denom) - cur := e.store.GetUInt64(key) - if cur < amount { - return fmt.Errorf("insufficient balance") - } - e.store.SetUInt64(key, cur-amount) - return nil -} - -var exampleBankSchema = schema.ModuleSchema{ - ObjectTypes: []schema.ObjectType{ - { - Name: "balances", - KeyFields: []schema.Field{ - { - Name: "account", - Kind: schema.StringKind, - }, - { - Name: "denom", - Kind: schema.StringKind, - }, - }, - ValueFields: []schema.Field{ - { - Name: "amount", - Kind: schema.Uint64Kind, - }, - }, - }, - }, -} - -func (e exampleBankModule) ModuleCodec() (schema.ModuleCodec, error) { - return schema.ModuleCodec{ - Schema: exampleBankSchema, - KVDecoder: func(update schema.KVPairUpdate) ([]schema.ObjectUpdate, error) { - key := string(update.Key) - value, err := strconv.ParseUint(string(update.Value), 10, 64) - if err != nil { - return nil, err - } - if strings.HasPrefix(key, "balance/") { - parts := strings.Split(key, "/") - return []schema.ObjectUpdate{{ - TypeName: "balances", - Key: []interface{}{parts[1], parts[2]}, - Value: value, - }}, nil - } else if strings.HasPrefix(key, "supply/") { - parts := strings.Split(key, "/") - return []schema.ObjectUpdate{{ - TypeName: "supply", - Key: []interface{}{parts[1]}, - Value: value, - }}, nil - } else { - return nil, fmt.Errorf("unexpected key: %s", key) - } - }, - }, nil -} - -var _ schema.HasModuleCodec = exampleBankModule{} - -type oneValueModule struct { - store *testStore -} - -var oneValueModSchema = schema.ModuleSchema{ - ObjectTypes: []schema.ObjectType{ - { - Name: "item", - ValueFields: []schema.Field{ - {Name: "value", Kind: schema.StringKind}, - }, - }, - }, -} - -func (i oneValueModule) ModuleCodec() (schema.ModuleCodec, error) { - return schema.ModuleCodec{ - Schema: oneValueModSchema, - KVDecoder: func(update schema.KVPairUpdate) ([]schema.ObjectUpdate, error) { - if string(update.Key) != "key" { - return nil, fmt.Errorf("unexpected key: %v", update.Key) - } - return []schema.ObjectUpdate{ - {TypeName: "item", Value: string(update.Value)}, - }, nil - }, - }, nil -} - -func (i oneValueModule) SetValue(x string) { - i.store.Set([]byte("key"), []byte(x)) -} - -var _ schema.HasModuleCodec = oneValueModule{} diff --git a/schema/decoding/middleware.go b/schema/decoding/middleware.go deleted file mode 100644 index 57c0783c62..0000000000 --- a/schema/decoding/middleware.go +++ /dev/null @@ -1,106 +0,0 @@ -package decoding - -import ( - "cosmossdk.io/schema" - "cosmossdk.io/schema/appdata" -) - -type MiddlewareOptions struct { - ModuleFilter func(moduleName string) bool -} - -// Middleware decodes raw data passed to the listener as kv-updates into decoded object updates. Module initialization -// is done lazily as modules are encountered in the kv-update stream. -func Middleware(target appdata.Listener, resolver DecoderResolver, opts MiddlewareOptions) (appdata.Listener, error) { - initializeModuleData := target.InitializeModuleData - onObjectUpdate := target.OnObjectUpdate - - // no-op if not listening to decoded data - if initializeModuleData == nil && onObjectUpdate == nil { - return target, nil - } - - onKVPair := target.OnKVPair - - moduleCodecs := map[string]*schema.ModuleCodec{} - - target.OnKVPair = func(data appdata.KVPairData) error { - // first forward kv pair updates - if onKVPair != nil { - err := onKVPair(data) - if err != nil { - return err - } - } - - for _, kvUpdate := range data.Updates { - // look for an existing codec - pcdc, ok := moduleCodecs[kvUpdate.ModuleName] - if !ok { - if opts.ModuleFilter != nil && !opts.ModuleFilter(kvUpdate.ModuleName) { - // we don't care about this module so store nil and continue - moduleCodecs[kvUpdate.ModuleName] = nil - continue - } - - // look for a new codec - cdc, found, err := resolver.LookupDecoder(kvUpdate.ModuleName) - if err != nil { - return err - } - - if !found { - // store nil to indicate we've seen this module and don't have a codec - // and keep processing the kv updates - moduleCodecs[kvUpdate.ModuleName] = nil - continue - } - - pcdc = &cdc - moduleCodecs[kvUpdate.ModuleName] = pcdc - - if initializeModuleData != nil { - err = initializeModuleData(appdata.ModuleInitializationData{ - ModuleName: kvUpdate.ModuleName, - Schema: cdc.Schema, - }) - if err != nil { - return err - } - } - } - - if pcdc == nil { - // we've already seen this module and can't decode - continue - } - - if onObjectUpdate == nil || pcdc.KVDecoder == nil { - // not listening to updates or can't decode so continue - continue - } - - updates, err := pcdc.KVDecoder(kvUpdate.Update) - if err != nil { - return err - } - - if len(updates) == 0 { - // no updates - continue - } - - err = target.OnObjectUpdate(appdata.ObjectUpdateData{ - ModuleName: kvUpdate.ModuleName, - Updates: updates, - }) - if err != nil { - return err - } - } - - return nil - } - - return target, nil -} diff --git a/schema/decoding/resolver.go b/schema/decoding/resolver.go deleted file mode 100644 index db0ec0bb17..0000000000 --- a/schema/decoding/resolver.go +++ /dev/null @@ -1,66 +0,0 @@ -package decoding - -import ( - "sort" - - "cosmossdk.io/schema" -) - -// DecoderResolver is an interface that allows indexers to discover and use module decoders. -type DecoderResolver interface { - // IterateAll iterates over all available module decoders. - IterateAll(func(moduleName string, cdc schema.ModuleCodec) error) error - - // LookupDecoder looks up a specific module decoder. - LookupDecoder(moduleName string) (decoder schema.ModuleCodec, found bool, err error) -} - -// ModuleSetDecoderResolver returns DecoderResolver that will discover modules implementing -// DecodeableModule in the provided module set. -func ModuleSetDecoderResolver(moduleSet map[string]interface{}) DecoderResolver { - return &moduleSetDecoderResolver{ - moduleSet: moduleSet, - } -} - -type moduleSetDecoderResolver struct { - moduleSet map[string]interface{} -} - -func (a moduleSetDecoderResolver) IterateAll(f func(string, schema.ModuleCodec) error) error { - keys := make([]string, 0, len(a.moduleSet)) - for k := range a.moduleSet { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - module := a.moduleSet[k] - dm, ok := module.(schema.HasModuleCodec) - if ok { - decoder, err := dm.ModuleCodec() - if err != nil { - return err - } - err = f(k, decoder) - if err != nil { - return err - } - } - } - return nil -} - -func (a moduleSetDecoderResolver) LookupDecoder(moduleName string) (schema.ModuleCodec, bool, error) { - mod, ok := a.moduleSet[moduleName] - if !ok { - return schema.ModuleCodec{}, false, nil - } - - dm, ok := mod.(schema.HasModuleCodec) - if !ok { - return schema.ModuleCodec{}, false, nil - } - - decoder, err := dm.ModuleCodec() - return decoder, true, err -} diff --git a/schema/decoding/resolver_test.go b/schema/decoding/resolver_test.go deleted file mode 100644 index f5caf287ca..0000000000 --- a/schema/decoding/resolver_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package decoding - -import ( - "errors" - "testing" - - "cosmossdk.io/schema" -) - -type modA struct{} - -func (m modA) ModuleCodec() (schema.ModuleCodec, error) { - return schema.ModuleCodec{ - Schema: schema.ModuleSchema{ObjectTypes: []schema.ObjectType{{Name: "A"}}}, - }, nil -} - -type modB struct{} - -func (m modB) ModuleCodec() (schema.ModuleCodec, error) { - return schema.ModuleCodec{ - Schema: schema.ModuleSchema{ObjectTypes: []schema.ObjectType{{Name: "B"}}}, - }, nil -} - -type modC struct{} - -var moduleSet = map[string]interface{}{ - "modA": modA{}, - "modB": modB{}, - "modC": modC{}, -} - -var testResolver = ModuleSetDecoderResolver(moduleSet) - -func TestModuleSetDecoderResolver_IterateAll(t *testing.T) { - objectTypes := map[string]bool{} - err := testResolver.IterateAll(func(moduleName string, cdc schema.ModuleCodec) error { - objectTypes[cdc.Schema.ObjectTypes[0].Name] = true - return nil - }) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if len(objectTypes) != 2 { - t.Fatalf("expected 2 object types, got %d", len(objectTypes)) - } - - if !objectTypes["A"] { - t.Fatalf("expected object type A") - } - - if !objectTypes["B"] { - t.Fatalf("expected object type B") - } -} - -func TestModuleSetDecoderResolver_LookupDecoder(t *testing.T) { - decoder, found, err := testResolver.LookupDecoder("modA") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !found { - t.Fatalf("expected to find decoder for modA") - } - - if decoder.Schema.ObjectTypes[0].Name != "A" { - t.Fatalf("expected object type A, got %s", decoder.Schema.ObjectTypes[0].Name) - } - - decoder, found, err = testResolver.LookupDecoder("modB") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !found { - t.Fatalf("expected to find decoder for modB") - } - - if decoder.Schema.ObjectTypes[0].Name != "B" { - t.Fatalf("expected object type B, got %s", decoder.Schema.ObjectTypes[0].Name) - } - - decoder, found, err = testResolver.LookupDecoder("modC") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if found { - t.Fatalf("expected not to find decoder") - } - - decoder, found, err = testResolver.LookupDecoder("modD") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if found { - t.Fatalf("expected not to find decoder") - } -} - -type modD struct{} - -func (m modD) ModuleCodec() (schema.ModuleCodec, error) { - return schema.ModuleCodec{}, errors.New("an error") -} - -func TestModuleSetDecoderResolver_IterateAll_Error(t *testing.T) { - resolver := ModuleSetDecoderResolver(map[string]interface{}{ - "modD": modD{}, - }) - err := resolver.IterateAll(func(moduleName string, cdc schema.ModuleCodec) error { - if moduleName == "modD" { - t.Fatalf("expected error") - } - return nil - }) - if err == nil { - t.Fatalf("expected error") - } -} diff --git a/schema/decoding/sync.go b/schema/decoding/sync.go deleted file mode 100644 index d8aee9884c..0000000000 --- a/schema/decoding/sync.go +++ /dev/null @@ -1,63 +0,0 @@ -package decoding - -import ( - "cosmossdk.io/schema" - "cosmossdk.io/schema/appdata" -) - -// SyncSource is an interface that allows indexers to start indexing modules with pre-existing state. -// It should generally be a wrapper around the key-value store. -type SyncSource interface { - // IterateAllKVPairs iterates over all key-value pairs for a given module. - IterateAllKVPairs(moduleName string, fn func(key, value []byte) error) error -} - -// SyncOptions are the options for Sync. -type SyncOptions struct { - ModuleFilter func(moduleName string) bool -} - -// Sync synchronizes existing state from the sync source to the listener using the resolver to decode data. -func Sync(listener appdata.Listener, source SyncSource, resolver DecoderResolver, opts SyncOptions) error { - initializeModuleData := listener.InitializeModuleData - onObjectUpdate := listener.OnObjectUpdate - - // no-op if not listening to decoded data - if initializeModuleData == nil && onObjectUpdate == nil { - return nil - } - - return resolver.IterateAll(func(moduleName string, cdc schema.ModuleCodec) error { - if opts.ModuleFilter != nil && !opts.ModuleFilter(moduleName) { - // ignore this module - return nil - } - - if initializeModuleData != nil { - err := initializeModuleData(appdata.ModuleInitializationData{ - ModuleName: moduleName, - Schema: cdc.Schema, - }) - if err != nil { - return err - } - } - - if onObjectUpdate == nil || cdc.KVDecoder == nil { - return nil - } - - return source.IterateAllKVPairs(moduleName, func(key, value []byte) error { - updates, err := cdc.KVDecoder(schema.KVPairUpdate{Key: key, Value: value}) - if err != nil { - return err - } - - if len(updates) == 0 { - return nil - } - - return onObjectUpdate(appdata.ObjectUpdateData{ModuleName: moduleName, Updates: updates}) - }) - }) -} diff --git a/schema/enum.go b/schema/enum.go deleted file mode 100644 index 927cc827cb..0000000000 --- a/schema/enum.go +++ /dev/null @@ -1,80 +0,0 @@ -package schema - -import ( - "errors" - "fmt" -) - -// EnumDefinition represents the definition of an enum type. -type EnumDefinition struct { - // Name is the name of the enum type. It must conform to the NameFormat regular expression. - // Its name must be unique between all enum types and object types in the module. - // The same enum, however, can be used in multiple object types and fields as long as the - // definition is identical each time - Name string - - // Values is a list of distinct, non-empty values that are part of the enum type. - // Each value must conform to the NameFormat regular expression. - Values []string -} - -// Validate validates the enum definition. -func (e EnumDefinition) Validate() error { - if !ValidateName(e.Name) { - return fmt.Errorf("invalid enum definition name %q", e.Name) - } - - if len(e.Values) == 0 { - return errors.New("enum definition values cannot be empty") - } - seen := make(map[string]bool, len(e.Values)) - for i, v := range e.Values { - if !ValidateName(v) { - return fmt.Errorf("invalid enum definition value %q at index %d for enum %s", v, i, e.Name) - } - - if seen[v] { - return fmt.Errorf("duplicate enum definition value %q for enum %s", v, e.Name) - } - seen[v] = true - } - return nil -} - -// ValidateValue validates that the value is a valid enum value. -func (e EnumDefinition) ValidateValue(value string) error { - for _, v := range e.Values { - if v == value { - return nil - } - } - return fmt.Errorf("value %q is not a valid enum value for %s", value, e.Name) -} - -// checkEnumCompatibility checks that the enum values are consistent across object types and fields. -func checkEnumCompatibility(enumValueMap map[string]map[string]bool, field Field) error { - if field.Kind != EnumKind { - return nil - } - - enum := field.EnumDefinition - - if existing, ok := enumValueMap[enum.Name]; ok { - if len(existing) != len(enum.Values) { - return fmt.Errorf("enum %q has different number of values in different object types", enum.Name) - } - - for _, value := range enum.Values { - if !existing[value] { - return fmt.Errorf("enum %q has different values in different object types", enum.Name) - } - } - } else { - valueMap := map[string]bool{} - for _, value := range enum.Values { - valueMap[value] = true - } - enumValueMap[enum.Name] = valueMap - } - return nil -} diff --git a/schema/enum_test.go b/schema/enum_test.go deleted file mode 100644 index 435449d0c5..0000000000 --- a/schema/enum_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package schema - -import ( - "strings" - "testing" -) - -func TestEnumDefinition_Validate(t *testing.T) { - tests := []struct { - name string - enum EnumDefinition - errContains string - }{ - { - name: "valid enum", - enum: EnumDefinition{ - Name: "test", - Values: []string{"a", "b", "c"}, - }, - errContains: "", - }, - { - name: "empty name", - enum: EnumDefinition{ - Name: "", - Values: []string{"a", "b", "c"}, - }, - errContains: "invalid enum definition name", - }, - { - name: "empty values", - enum: EnumDefinition{ - Name: "test", - Values: []string{}, - }, - errContains: "enum definition values cannot be empty", - }, - { - name: "empty value", - enum: EnumDefinition{ - Name: "test", - Values: []string{"a", "", "c"}, - }, - errContains: "invalid enum definition value", - }, - { - name: "duplicate value", - enum: EnumDefinition{ - Name: "test", - Values: []string{"a", "b", "a"}, - }, - errContains: "duplicate enum definition value \"a\" for enum test", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.enum.Validate() - if tt.errContains == "" { - if err != nil { - t.Errorf("expected valid enum definition to pass validation, got: %v", err) - } - } else { - if err == nil { - t.Errorf("expected invalid enum definition to fail validation, got nil error") - } else if !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("expected error to contain %s, got: %v", tt.errContains, err) - } - } - }) - } -} - -func TestEnumDefinition_ValidateValue(t *testing.T) { - enum := EnumDefinition{ - Name: "test", - Values: []string{"a", "b", "c"}, - } - - tests := []struct { - value string - errContains string - }{ - {"a", ""}, - {"b", ""}, - {"c", ""}, - {"d", "value \"d\" is not a valid enum value for test"}, - } - - for _, tt := range tests { - t.Run(tt.value, func(t *testing.T) { - err := enum.ValidateValue(tt.value) - if tt.errContains == "" { - if err != nil { - t.Errorf("expected valid enum value to pass validation, got: %v", err) - } - } else { - if err == nil { - t.Errorf("expected invalid enum value to fail validation, got nil error") - } else if !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("expected error to contain %s, got: %v", tt.errContains, err) - } - } - }) - } -} diff --git a/schema/field.go b/schema/field.go deleted file mode 100644 index a11cc75668..0000000000 --- a/schema/field.go +++ /dev/null @@ -1,67 +0,0 @@ -package schema - -import "fmt" - -// Field represents a field in an object type. -type Field struct { - // Name is the name of the field. It must conform to the NameFormat regular expression. - Name string - - // Kind is the basic type of the field. - Kind Kind - - // Nullable indicates whether null values are accepted for the field. Key fields CANNOT be nullable. - Nullable bool - - // EnumDefinition is the definition of the enum type and is only valid when Kind is EnumKind. - // The same enum types can be reused in the same module schema, but they always must contain - // the same values for the same enum name. This possibly introduces some duplication of - // definitions but makes it easier to reason about correctness and validation in isolation. - EnumDefinition EnumDefinition -} - -// Validate validates the field. -func (c Field) Validate() error { - // valid name - if !ValidateName(c.Name) { - return fmt.Errorf("invalid field name %q", c.Name) - } - - // valid kind - if err := c.Kind.Validate(); err != nil { - return fmt.Errorf("invalid field kind for %q: %v", c.Name, err) //nolint:errorlint // false positive due to using go1.12 - } - - // enum definition only valid with EnumKind - if c.Kind == EnumKind { - if err := c.EnumDefinition.Validate(); err != nil { - return fmt.Errorf("invalid enum definition for field %q: %v", c.Name, err) //nolint:errorlint // false positive due to using go1.12 - } - } else if c.Kind != EnumKind && (c.EnumDefinition.Name != "" || c.EnumDefinition.Values != nil) { - return fmt.Errorf("enum definition is only valid for field %q with type EnumKind", c.Name) - } - - return nil -} - -// ValidateValue validates that the value conforms to the field's kind and nullability. -// Unlike Kind.ValidateValue, it also checks that the value conforms to the EnumDefinition -// if the field is an EnumKind. -func (c Field) ValidateValue(value interface{}) error { - if value == nil { - if !c.Nullable { - return fmt.Errorf("field %q cannot be null", c.Name) - } - return nil - } - err := c.Kind.ValidateValueType(value) - if err != nil { - return fmt.Errorf("invalid value for field %q: %v", c.Name, err) //nolint:errorlint // false positive due to using go1.12 - } - - if c.Kind == EnumKind { - return c.EnumDefinition.ValidateValue(value.(string)) - } - - return nil -} diff --git a/schema/field_test.go b/schema/field_test.go deleted file mode 100644 index ea839ece08..0000000000 --- a/schema/field_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package schema - -import ( - "strings" - "testing" -) - -func TestField_Validate(t *testing.T) { - tests := []struct { - name string - field Field - errContains string - }{ - { - name: "valid field", - field: Field{ - Name: "field1", - Kind: StringKind, - }, - errContains: "", - }, - { - name: "empty name", - field: Field{ - Name: "", - Kind: StringKind, - }, - errContains: "invalid field name", - }, - { - name: "invalid kind", - field: Field{ - Name: "field1", - Kind: InvalidKind, - }, - errContains: "invalid field kind", - }, - { - name: "invalid enum definition", - field: Field{ - Name: "field1", - Kind: EnumKind, - }, - errContains: "invalid enum definition", - }, - { - name: "enum definition with non-EnumKind", - field: Field{ - Name: "field1", - Kind: StringKind, - EnumDefinition: EnumDefinition{Name: "enum"}, - }, - errContains: "enum definition is only valid for field \"field1\" with type EnumKind", - }, - { - name: "valid enum", - field: Field{ - Name: "field1", - Kind: EnumKind, - EnumDefinition: EnumDefinition{Name: "enum", Values: []string{"a", "b"}}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.field.Validate() - if tt.errContains == "" { - if err != nil { - t.Errorf("expected no error, got: %v", err) - } - } else { - if err == nil { - t.Errorf("expected error, got nil") - } else if !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("expected error contains: %s, got: %v", tt.errContains, err) - } - } - }) - } -} - -func TestField_ValidateValue(t *testing.T) { - tests := []struct { - name string - field Field - value interface{} - errContains string - }{ - { - name: "valid field", - field: Field{ - Name: "field1", - Kind: StringKind, - }, - value: "value", - errContains: "", - }, - { - name: "null non-nullable field", - field: Field{ - Name: "field1", - Kind: StringKind, - Nullable: false, - }, - value: nil, - errContains: "cannot be null", - }, - { - name: "null nullable field", - field: Field{ - Name: "field1", - Kind: StringKind, - Nullable: true, - }, - value: nil, - errContains: "", - }, - { - name: "invalid value", - field: Field{ - Name: "field1", - Kind: StringKind, - }, - value: 1, - errContains: "invalid value for field \"field1\"", - }, - { - name: "valid enum", - field: Field{ - Name: "field1", - Kind: EnumKind, - EnumDefinition: EnumDefinition{Name: "enum", Values: []string{"a", "b"}}, - }, - value: "a", - errContains: "", - }, - { - name: "invalid enum", - field: Field{ - Name: "field1", - Kind: EnumKind, - EnumDefinition: EnumDefinition{Name: "enum", Values: []string{"a", "b"}}, - }, - value: "c", - errContains: "not a valid enum value", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.field.ValidateValue(tt.value) - if tt.errContains == "" { - if err != nil { - t.Errorf("expected no error, got: %v", err) - } - } else { - if err == nil { - t.Errorf("expected error, got nil") - } else if !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("expected error contains: %s, got: %v", tt.errContains, err) - } - } - }) - } -} diff --git a/schema/fields.go b/schema/fields.go deleted file mode 100644 index be08ca66ef..0000000000 --- a/schema/fields.go +++ /dev/null @@ -1,71 +0,0 @@ -package schema - -import "fmt" - -// ValidateObjectKey validates that the value conforms to the set of fields as a Key in an ObjectUpdate. -// See ObjectUpdate.Key for documentation on the requirements of such keys. -func ValidateObjectKey(keyFields []Field, value interface{}) error { - return validateFieldsValue(keyFields, value) -} - -// ValidateObjectValue validates that the value conforms to the set of fields as a Value in an ObjectUpdate. -// See ObjectUpdate.Value for documentation on the requirements of such values. -func ValidateObjectValue(valueFields []Field, value interface{}) error { - valueUpdates, ok := value.(ValueUpdates) - if !ok { - return validateFieldsValue(valueFields, value) - } - - values := map[string]interface{}{} - err := valueUpdates.Iterate(func(fieldname string, value interface{}) bool { - values[fieldname] = value - return true - }) - if err != nil { - return err - } - - for _, field := range valueFields { - v, ok := values[field.Name] - if !ok { - continue - } - - if err := field.ValidateValue(v); err != nil { - return err - } - - delete(values, field.Name) - } - - if len(values) > 0 { - return fmt.Errorf("unexpected values in ValueUpdates: %v", values) - } - - return nil -} - -func validateFieldsValue(fields []Field, value interface{}) error { - if len(fields) == 0 { - return nil - } - - if len(fields) == 1 { - return fields[0].ValidateValue(value) - } - - values, ok := value.([]interface{}) - if !ok { - return fmt.Errorf("expected slice of values for key fields, got %T", value) - } - - if len(fields) != len(values) { - return fmt.Errorf("expected %d key fields, got %d values", len(fields), len(value.([]interface{}))) - } - for i, field := range fields { - if err := field.ValidateValue(values[i]); err != nil { - return err - } - } - return nil -} diff --git a/schema/fields_test.go b/schema/fields_test.go deleted file mode 100644 index befa968657..0000000000 --- a/schema/fields_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package schema - -import ( - "strings" - "testing" -) - -func TestValidateForKeyFields(t *testing.T) { - tests := []struct { - name string - keyFields []Field - key interface{} - errContains string - }{ - { - name: "no key fields", - keyFields: nil, - key: nil, - }, - { - name: "single key field, valid", - keyFields: object1Type.KeyFields, - key: "hello", - errContains: "", - }, - { - name: "single key field, invalid", - keyFields: object1Type.KeyFields, - key: []interface{}{"value"}, - errContains: "invalid value", - }, - { - name: "multiple key fields, valid", - keyFields: object2Type.KeyFields, - key: []interface{}{"hello", int32(42)}, - }, - { - name: "multiple key fields, not a slice", - keyFields: object2Type.KeyFields, - key: map[string]interface{}{"field1": "hello", "field2": "42"}, - errContains: "expected slice of values", - }, - { - name: "multiple key fields, wrong number of values", - keyFields: object2Type.KeyFields, - key: []interface{}{"hello"}, - errContains: "expected 2 key fields", - }, - { - name: "multiple key fields, invalid value", - keyFields: object2Type.KeyFields, - key: []interface{}{"hello", "abc"}, - errContains: "invalid value", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateObjectKey(tt.keyFields, tt.key) - if tt.errContains == "" { - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - } else { - if err == nil || !strings.Contains(err.Error(), tt.errContains) { - t.Fatalf("expected error to contain %q, got: %v", tt.errContains, err) - } - } - }) - } -} - -func TestValidateForValueFields(t *testing.T) { - tests := []struct { - name string - valueFields []Field - value interface{} - errContains string - }{ - { - name: "no value fields", - valueFields: nil, - value: nil, - }, - { - name: "single value field, valid", - valueFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - value: "hello", - errContains: "", - }, - { - name: "value updates, empty", - valueFields: object3Type.ValueFields, - value: MapValueUpdates(map[string]interface{}{}), - }, - { - name: "value updates, 1 field valid", - valueFields: object3Type.ValueFields, - value: MapValueUpdates(map[string]interface{}{ - "field1": "hello", - }), - }, - { - name: "value updates, 2 fields, 1 invalid", - valueFields: object3Type.ValueFields, - value: MapValueUpdates(map[string]interface{}{ - "field1": "hello", - "field2": "abc", - }), - errContains: "expected int32", - }, - { - name: "value updates, extra value", - valueFields: object3Type.ValueFields, - value: MapValueUpdates(map[string]interface{}{ - "field1": "hello", - "field2": int32(42), - "field3": "extra", - }), - errContains: "unexpected values", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateObjectValue(tt.valueFields, tt.value) - if tt.errContains == "" { - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - } else { - if err == nil || !strings.Contains(err.Error(), tt.errContains) { - t.Fatalf("expected error to contain %q, got: %v", tt.errContains, err) - } - } - }) - } -} diff --git a/schema/go.mod b/schema/go.mod deleted file mode 100644 index 054862393f..0000000000 --- a/schema/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module cosmossdk.io/schema - -// NOTE: this go.mod should have zero dependencies and remain on go 1.12 to stay compatible -// with all known production releases of the Cosmos SDK. This is to ensure that all historical -// apps could be patched to support indexing if desired. - -go 1.12 diff --git a/schema/indexer/README.md b/schema/indexer/README.md deleted file mode 100644 index 9fdec6753a..0000000000 --- a/schema/indexer/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Indexer Framework - -# Defining an Indexer - -Indexer implementations should be registered with the `indexer.Register` function with a unique type name. Indexers take the configuration options defined by `indexer.Config` which defines a common set of configuration options as well as indexer-specific options under the `config` sub-key. Indexers do not need to manage the common filtering options specified in `Config` - the indexer manager will manage these for the indexer. Indexer implementations just need to return a correct `InitResult` response. - -# Integrating the Indexer Manager - -The indexer manager should be used for managing all indexers and should be integrated directly with applications wishing to support indexing. The `StartManager` function is used to start the manager. The configuration options for the manager and all indexer targets should be passed as the ManagerOptions.Config field and should match the json structure of ManagerConfig. An example configuration section in `app.toml` might look like this: - -```toml -[indexer.target.postgres] -type = "postgres" -config.database_url = "postgres://user:password@localhost:5432/dbname" -``` diff --git a/schema/indexer/indexer.go b/schema/indexer/indexer.go deleted file mode 100644 index ba3d0db704..0000000000 --- a/schema/indexer/indexer.go +++ /dev/null @@ -1,78 +0,0 @@ -package indexer - -import ( - "context" - - "cosmossdk.io/schema/appdata" - "cosmossdk.io/schema/logutil" -) - -// Config species the configuration passed to an indexer initialization function. -// It includes both common configuration options related to include or excluding -// parts of the data stream as well as indexer specific options under the config -// subsection. -// -// NOTE: it is an error for an indexer to change its common options, such as adding -// or removing indexed modules, after the indexer has been initialized because this -// could result in an inconsistent state. -type Config struct { - // Type is the name of the indexer type as registered with Register. - Type string `json:"type"` - - // Config are the indexer specific config options specified by the user. - Config map[string]interface{} `json:"config"` - - // ExcludeState specifies that the indexer will not receive state updates. - ExcludeState bool `json:"exclude_state"` - - // ExcludeEvents specifies that the indexer will not receive events. - ExcludeEvents bool `json:"exclude_events"` - - // ExcludeTxs specifies that the indexer will not receive transaction's. - ExcludeTxs bool `json:"exclude_txs"` - - // ExcludeBlockHeaders specifies that the indexer will not receive block headers, - // although it will still receive StartBlock and Commit callbacks, just without - // the header data. - ExcludeBlockHeaders bool `json:"exclude_block_headers"` - - // IncludeModules specifies a list of modules whose state the indexer will - // receive state updates for. - // Only one of include or exclude modules should be specified. - IncludeModules []string `json:"include_modules"` - - // ExcludeModules specifies a list of modules whose state the indexer will not - // receive state updates for. - // Only one of include or exclude modules should be specified. - ExcludeModules []string `json:"exclude_modules"` -} - -type InitFunc = func(InitParams) (InitResult, error) - -// InitParams is the input to the indexer initialization function. -type InitParams struct { - // Config is the indexer config. - Config Config - - // Context is the context that the indexer should use to listen for a shutdown signal via Context.Done(). Other - // parameters may also be passed through context from the app if necessary. - Context context.Context - - // Logger is a logger the indexer can use to write log messages. - Logger logutil.Logger -} - -// InitResult is the indexer initialization result and includes the indexer's listener implementation. -type InitResult struct { - // Listener is the indexer's app data listener. - Listener appdata.Listener - - // LastBlockPersisted indicates the last block that the indexer persisted (if it is persisting data). It - // should be 0 if the indexer has no data stored and wants to start syncing state. It should be -1 if the indexer - // does not care to persist state at all and is just listening for some other streaming purpose. If the indexer - // has persisted state and has missed some blocks, a runtime error will occur to prevent the indexer from continuing - // in an invalid state. If an indexer starts indexing after a chain's genesis (returning 0), the indexer manager - // will attempt to perform a catch-up sync of state. Historical events will not be replayed, but an accurate - // representation of the current state at the height at which indexing began can be reproduced. - LastBlockPersisted int64 -} diff --git a/schema/indexer/manager.go b/schema/indexer/manager.go deleted file mode 100644 index 5a7e39faad..0000000000 --- a/schema/indexer/manager.go +++ /dev/null @@ -1,44 +0,0 @@ -package indexer - -import ( - "context" - - "cosmossdk.io/schema/appdata" - "cosmossdk.io/schema/decoding" - "cosmossdk.io/schema/logutil" -) - -// ManagerOptions are the options for starting the indexer manager. -type ManagerOptions struct { - // Config is the user configuration for all indexing. It should generally be an instance of map[string]interface{} - // and match the json structure of ManagerConfig. The manager will attempt to convert it to ManagerConfig. - Config interface{} - - // Resolver is the decoder resolver that will be used to decode the data. It is required. - Resolver decoding.DecoderResolver - - // SyncSource is a representation of the current state of key-value data to be used in a catch-up sync. - // Catch-up syncs will be performed at initialization when necessary. SyncSource is optional but if - // it is omitted, indexers will only be able to start indexing state from genesis. - SyncSource decoding.SyncSource - - // Logger is the logger that indexers can use to write logs. It is optional. - Logger logutil.Logger - - // Context is the context that indexers should use for shutdown signals via Context.Done(). It can also - // be used to pass down other parameters to indexers if necessary. If it is omitted, context.Background - // will be used. - Context context.Context -} - -// ManagerConfig is the configuration of the indexer manager and contains the configuration for each indexer target. -type ManagerConfig struct { - // Target is a map of named indexer targets to their configuration. - Target map[string]Config -} - -// StartManager starts the indexer manager with the given options. The state machine should write all relevant app data to -// the returned listener. -func StartManager(opts ManagerOptions) (appdata.Listener, error) { - panic("TODO: this will be implemented in a follow-up PR, this function is just a stub to demonstrate the API") -} diff --git a/schema/indexer/registry.go b/schema/indexer/registry.go deleted file mode 100644 index 445f56876a..0000000000 --- a/schema/indexer/registry.go +++ /dev/null @@ -1,14 +0,0 @@ -package indexer - -import "fmt" - -// Register registers an indexer type with the given initialization function. -func Register(indexerType string, initFunc InitFunc) { - if _, ok := indexerRegistry[indexerType]; ok { - panic(fmt.Sprintf("indexer %s already registered", indexerType)) - } - - indexerRegistry[indexerType] = initFunc -} - -var indexerRegistry = map[string]InitFunc{} diff --git a/schema/indexer/registry_test.go b/schema/indexer/registry_test.go deleted file mode 100644 index b9f46910c8..0000000000 --- a/schema/indexer/registry_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package indexer - -import "testing" - -func TestRegister(t *testing.T) { - Register("test", func(params InitParams) (InitResult, error) { - return InitResult{}, nil - }) - - if indexerRegistry["test"] == nil { - t.Fatalf("expected to find indexer") - } - - if indexerRegistry["test2"] != nil { - t.Fatalf("expected not to find indexer") - } - - defer func() { - if r := recover(); r == nil { - t.Fatalf("expected to panic") - } - }() - Register("test", func(params InitParams) (InitResult, error) { - return InitResult{}, nil - }) -} diff --git a/schema/kind.go b/schema/kind.go deleted file mode 100644 index c28ce34af0..0000000000 --- a/schema/kind.go +++ /dev/null @@ -1,363 +0,0 @@ -package schema - -import ( - "encoding/json" - "fmt" - "regexp" - "time" - "unicode/utf8" -) - -// Kind represents the basic type of a field in an object. -// Each kind defines the types of go values which should be accepted -// by listeners and generated by decoders when providing entity updates. -type Kind int - -const ( - // InvalidKind indicates that an invalid type. - InvalidKind Kind = iota - - // StringKind is a string type and values of this type must be of the go type string - // containing valid UTF-8 and cannot contain null characters. - StringKind - - // BytesKind is a bytes type and values of this type must be of the go type []byte. - BytesKind - - // Int8Kind is an int8 type and values of this type must be of the go type int8. - Int8Kind - - // Uint8Kind is a uint8 type and values of this type must be of the go type uint8. - Uint8Kind - - // Int16Kind is an int16 type and values of this type must be of the go type int16. - Int16Kind - - // Uint16Kind is a uint16 type and values of this type must be of the go type uint16. - Uint16Kind - - // Int32Kind is an int32 type and values of this type must be of the go type int32. - Int32Kind - - // Uint32Kind is a uint32 type and values of this type must be of the go type uint32. - Uint32Kind - - // Int64Kind is an int64 type and values of this type must be of the go type int64. - Int64Kind - - // Uint64Kind is a uint64 type and values of this type must be of the go type uint64. - Uint64Kind - - // IntegerStringKind represents an arbitrary precision integer number. Values of this type must - // be of the go type string and formatted as base10 integers, specifically matching to - // the IntegerFormat regex. - IntegerStringKind - - // DecimalStringKind represents an arbitrary precision decimal or integer number. Values of this type - // must be of the go type string and match the DecimalFormat regex. - DecimalStringKind - - // BoolKind is a boolean type and values of this type must be of the go type bool. - BoolKind - - // TimeKind is a time type and values of this type must be of the go type time.Time. - TimeKind - - // DurationKind is a duration type and values of this type must be of the go type time.Duration. - DurationKind - - // Float32Kind is a float32 type and values of this type must be of the go type float32. - Float32Kind - - // Float64Kind is a float64 type and values of this type must be of the go type float64. - Float64Kind - - // AddressKind represents an account address and must be of type []byte. Addresses usually have a - // human-readable rendering, such as bech32, and tooling should provide a way for apps to define a - // string encoder for friendly user-facing display. - AddressKind - - // EnumKind is an enum type and values of this type must be of the go type string. - // Fields of this type are expected to set the EnumDefinition field in the field definition to the enum - // definition. - EnumKind - - // JSONKind is a JSON type and values of this type should be of go type json.RawMessage and represent - // valid JSON. - JSONKind -) - -// MAX_VALID_KIND is the maximum valid kind value. -const MAX_VALID_KIND = JSONKind - -const ( - // IntegerFormat is a regex that describes the format integer number strings must match. It specifies - // that integers may have at most 100 digits. - IntegerFormat = `^-?[0-9]{1,100}$` - - // DecimalFormat is a regex that describes the format decimal number strings must match. It specifies - // that decimals may have at most 50 digits before and after the decimal point and may have an optional - // exponent of up to 2 digits. These restrictions ensure that the decimal can be accurately represented - // by a wide variety of implementations. - DecimalFormat = `^-?[0-9]{1,50}(\.[0-9]{1,50})?([eE][-+]?[0-9]{1,2})?$` -) - -// Validate returns an errContains if the kind is invalid. -func (t Kind) Validate() error { - if t <= InvalidKind { - return fmt.Errorf("unknown type: %d", t) - } - if t > JSONKind { - return fmt.Errorf("invalid type: %d", t) - } - return nil -} - -// String returns a string representation of the kind. -func (t Kind) String() string { - switch t { - case StringKind: - return "string" - case BytesKind: - return "bytes" - case Int8Kind: - return "int8" - case Uint8Kind: - return "uint8" - case Int16Kind: - return "int16" - case Uint16Kind: - return "uint16" - case Int32Kind: - return "int32" - case Uint32Kind: - return "uint32" - case Int64Kind: - return "int64" - case Uint64Kind: - return "uint64" - case DecimalStringKind: - return "decimal" - case IntegerStringKind: - return "integer" - case BoolKind: - return "bool" - case TimeKind: - return "time" - case DurationKind: - return "duration" - case Float32Kind: - return "float32" - case Float64Kind: - return "float64" - case AddressKind: - return "bech32address" - case EnumKind: - return "enum" - case JSONKind: - return "json" - default: - return fmt.Sprintf("invalid(%d)", t) - } -} - -// ValidateValueType returns an errContains if the value does not conform to the expected go type. -// Some fields may accept nil values, however, this method does not have any notion of -// nullability. This method only validates that the go type of the value is correct for the kind -// and does not validate string or json formats. Kind.ValidateValue does a more thorough validation -// of number and json string formatting. -func (t Kind) ValidateValueType(value interface{}) error { - switch t { - case StringKind: - _, ok := value.(string) - if !ok { - return fmt.Errorf("expected string, got %T", value) - } - case BytesKind: - _, ok := value.([]byte) - if !ok { - return fmt.Errorf("expected []byte, got %T", value) - } - case Int8Kind: - _, ok := value.(int8) - if !ok { - return fmt.Errorf("expected int8, got %T", value) - } - case Uint8Kind: - _, ok := value.(uint8) - if !ok { - return fmt.Errorf("expected uint8, got %T", value) - } - case Int16Kind: - _, ok := value.(int16) - if !ok { - return fmt.Errorf("expected int16, got %T", value) - } - case Uint16Kind: - _, ok := value.(uint16) - if !ok { - return fmt.Errorf("expected uint16, got %T", value) - } - case Int32Kind: - _, ok := value.(int32) - if !ok { - return fmt.Errorf("expected int32, got %T", value) - } - case Uint32Kind: - _, ok := value.(uint32) - if !ok { - return fmt.Errorf("expected uint32, got %T", value) - } - case Int64Kind: - _, ok := value.(int64) - if !ok { - return fmt.Errorf("expected int64, got %T", value) - } - case Uint64Kind: - _, ok := value.(uint64) - if !ok { - return fmt.Errorf("expected uint64, got %T", value) - } - case IntegerStringKind: - _, ok := value.(string) - if !ok { - return fmt.Errorf("expected string, got %T", value) - } - - case DecimalStringKind: - _, ok := value.(string) - if !ok { - return fmt.Errorf("expected string, got %T", value) - } - case BoolKind: - _, ok := value.(bool) - if !ok { - return fmt.Errorf("expected bool, got %T", value) - } - case TimeKind: - _, ok := value.(time.Time) - if !ok { - return fmt.Errorf("expected time.Time, got %T", value) - } - case DurationKind: - _, ok := value.(time.Duration) - if !ok { - return fmt.Errorf("expected time.Duration, got %T", value) - } - case Float32Kind: - _, ok := value.(float32) - if !ok { - return fmt.Errorf("expected float32, got %T", value) - } - case Float64Kind: - _, ok := value.(float64) - if !ok { - return fmt.Errorf("expected float64, got %T", value) - } - case AddressKind: - _, ok := value.([]byte) - if !ok { - return fmt.Errorf("expected []byte, got %T", value) - } - case EnumKind: - _, ok := value.(string) - if !ok { - return fmt.Errorf("expected string, got %T", value) - } - case JSONKind: - _, ok := value.(json.RawMessage) - if !ok { - return fmt.Errorf("expected json.RawMessage, got %T", value) - } - default: - return fmt.Errorf("invalid type: %d", t) - } - return nil -} - -// ValidateValue returns an errContains if the value does not conform to the expected go type and format. -// It is more thorough, but slower, than Kind.ValidateValueType and validates that Integer, Decimal and JSON -// values are formatted correctly. It cannot validate enum values because Kind's do not have enum schemas. -func (t Kind) ValidateValue(value interface{}) error { - err := t.ValidateValueType(value) - if err != nil { - return err - } - - switch t { - case StringKind: - str := value.(string) - if !utf8.ValidString(str) { - return fmt.Errorf("expected valid utf-8 string, got %s", value) - } - - // check for null characters - for _, r := range str { - if r == 0 { - return fmt.Errorf("expected string without null characters, got %s", value) - } - } - case IntegerStringKind: - if !integerRegex.Match([]byte(value.(string))) { - return fmt.Errorf("expected base10 integer, got %s", value) - } - case DecimalStringKind: - if !decimalRegex.Match([]byte(value.(string))) { - return fmt.Errorf("expected decimal number, got %s", value) - } - case JSONKind: - if !json.Valid(value.(json.RawMessage)) { - return fmt.Errorf("expected valid JSON, got %s", value) - } - default: - return nil - } - return nil -} - -var ( - integerRegex = regexp.MustCompile(IntegerFormat) - decimalRegex = regexp.MustCompile(DecimalFormat) -) - -// KindForGoValue finds the simplest kind that can represent the given go value. It will not, however, -// return kinds such as IntegerStringKind, DecimalStringKind, AddressKind, or EnumKind which all can be -// represented as strings. -func KindForGoValue(value interface{}) Kind { - switch value.(type) { - case string: - return StringKind - case []byte: - return BytesKind - case int8: - return Int8Kind - case uint8: - return Uint8Kind - case int16: - return Int16Kind - case uint16: - return Uint16Kind - case int32: - return Int32Kind - case uint32: - return Uint32Kind - case int64: - return Int64Kind - case uint64: - return Uint64Kind - case float32: - return Float32Kind - case float64: - return Float64Kind - case bool: - return BoolKind - case time.Time: - return TimeKind - case time.Duration: - return DurationKind - case json.RawMessage: - return JSONKind - default: - return InvalidKind - } -} diff --git a/schema/kind_test.go b/schema/kind_test.go deleted file mode 100644 index 113762ec2e..0000000000 --- a/schema/kind_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package schema - -import ( - "encoding/json" - "fmt" - "testing" - "time" -) - -func TestKind_Validate(t *testing.T) { - for kind := InvalidKind + 1; kind <= MAX_VALID_KIND; kind++ { - if err := kind.Validate(); err != nil { - t.Errorf("expected valid kind %s to pass validation, got: %v", kind, err) - } - } - - invalidKinds := []Kind{ - Kind(-1), - InvalidKind, - Kind(100), - } - - for _, kind := range invalidKinds { - if err := kind.Validate(); err == nil { - t.Errorf("expected invalid kind %s to fail validation, got: %v", kind, err) - } - } -} - -func TestKind_ValidateValueType(t *testing.T) { - tests := []struct { - kind Kind - value interface{} - valid bool - }{ - {kind: StringKind, value: "hello", valid: true}, - {kind: StringKind, value: []byte("hello"), valid: false}, - {kind: BytesKind, value: []byte("hello"), valid: true}, - {kind: BytesKind, value: "hello", valid: false}, - {kind: Int8Kind, value: int8(1), valid: true}, - {kind: Int8Kind, value: int16(1), valid: false}, - {kind: Uint8Kind, value: uint8(1), valid: true}, - {kind: Uint8Kind, value: uint16(1), valid: false}, - {kind: Int16Kind, value: int16(1), valid: true}, - {kind: Int16Kind, value: int32(1), valid: false}, - {kind: Uint16Kind, value: uint16(1), valid: true}, - {kind: Uint16Kind, value: uint32(1), valid: false}, - {kind: Int32Kind, value: int32(1), valid: true}, - {kind: Int32Kind, value: int64(1), valid: false}, - {kind: Uint32Kind, value: uint32(1), valid: true}, - {kind: Uint32Kind, value: uint64(1), valid: false}, - {kind: Int64Kind, value: int64(1), valid: true}, - {kind: Int64Kind, value: int32(1), valid: false}, - {kind: Uint64Kind, value: uint64(1), valid: true}, - {kind: Uint64Kind, value: uint32(1), valid: false}, - {kind: IntegerStringKind, value: "1", valid: true}, - {kind: IntegerStringKind, value: int32(1), valid: false}, - {kind: DecimalStringKind, value: "1.0", valid: true}, - {kind: DecimalStringKind, value: "1", valid: true}, - {kind: DecimalStringKind, value: "1.1e4", valid: true}, - {kind: DecimalStringKind, value: int32(1), valid: false}, - {kind: AddressKind, value: []byte("hello"), valid: true}, - {kind: AddressKind, value: 1, valid: false}, - {kind: BoolKind, value: true, valid: true}, - {kind: BoolKind, value: false, valid: true}, - {kind: BoolKind, value: 1, valid: false}, - {kind: EnumKind, value: "hello", valid: true}, - {kind: EnumKind, value: 1, valid: false}, - {kind: TimeKind, value: time.Now(), valid: true}, - {kind: TimeKind, value: "hello", valid: false}, - {kind: DurationKind, value: time.Second, valid: true}, - {kind: DurationKind, value: "hello", valid: false}, - {kind: Float32Kind, value: float32(1.0), valid: true}, - {kind: Float32Kind, value: float64(1.0), valid: false}, - {kind: Float64Kind, value: float64(1.0), valid: true}, - {kind: Float64Kind, value: float32(1.0), valid: false}, - {kind: JSONKind, value: json.RawMessage("{}"), valid: true}, - {kind: JSONKind, value: "hello", valid: false}, - {kind: InvalidKind, value: "hello", valid: false}, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { - err := tt.kind.ValidateValueType(tt.value) - if tt.valid && err != nil { - t.Errorf("test %d: expected valid value %v for kind %s to pass validation, got: %v", i, tt.value, tt.kind, err) - } - if !tt.valid && err == nil { - t.Errorf("test %d: expected invalid value %v for kind %s to fail validation, got: %v", i, tt.value, tt.kind, err) - } - }) - } - - // nils get rejected - for kind := InvalidKind + 1; kind <= MAX_VALID_KIND; kind++ { - if err := kind.ValidateValueType(nil); err == nil { - t.Errorf("expected nil value to fail validation for kind %s", kind) - } - } -} - -func TestKind_ValidateValue(t *testing.T) { - tests := []struct { - kind Kind - value interface{} - valid bool - }{ - // check a few basic cases that should get caught be ValidateValueType - {StringKind, "hello", true}, - {Int64Kind, int64(1), true}, - {Int32Kind, "abc", false}, - {BytesKind, nil, false}, - // string must be valid UTF-8 - {StringKind, string([]byte{0xff, 0xfe, 0xfd}), false}, - // strings with null characters are invalid - {StringKind, string([]byte{1, 2, 0, 3}), false}, - // check integer, decimal and json more thoroughly - {IntegerStringKind, "1", true}, - {IntegerStringKind, "0", true}, - {IntegerStringKind, "10", true}, - {IntegerStringKind, "-100", true}, - {IntegerStringKind, "1.0", false}, - {IntegerStringKind, "00", true}, // leading zeros are allowed - {IntegerStringKind, "001", true}, - {IntegerStringKind, "-01", true}, - // 100 digits - {IntegerStringKind, "1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", true}, - // more than 100 digits - {IntegerStringKind, "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", false}, - {IntegerStringKind, "", false}, - {IntegerStringKind, "abc", false}, - {IntegerStringKind, "abc100", false}, - {DecimalStringKind, "1.0", true}, - {DecimalStringKind, "0.0", true}, - {DecimalStringKind, "-100.075", true}, - {DecimalStringKind, "1002346.000", true}, - {DecimalStringKind, "0", true}, - {DecimalStringKind, "10", true}, - {DecimalStringKind, "-100", true}, - {DecimalStringKind, "1", true}, - {DecimalStringKind, "1.0e4", true}, - {DecimalStringKind, "1.0e-4", true}, - {DecimalStringKind, "1.0e+4", true}, - {DecimalStringKind, "1.0e", false}, - {DecimalStringKind, "1.0e4.0", false}, - {DecimalStringKind, "1.0e-4.0", false}, - {DecimalStringKind, "1.0e+4.0", false}, - {DecimalStringKind, "-1.0e-4", true}, - {DecimalStringKind, "-1.0e+4", true}, - {DecimalStringKind, "-1.0E4", true}, - {DecimalStringKind, "1E-9", true}, - {DecimalStringKind, "1E-99", true}, - {DecimalStringKind, "1E+9", true}, - {DecimalStringKind, "1E+99", true}, - // 50 digits before and after the decimal point - {DecimalStringKind, "10000000000000000000000000000000000000000000000000.10000000000000000000000000000000000000000000000001", true}, - // too many digits before the decimal point - {DecimalStringKind, "10000000000000000000000000000000000000000000000000000000000000000000000000", false}, - // too many digits after the decimal point - {DecimalStringKind, "1.0000000000000000000000000000000000000000000000000000000000000000000000001", false}, - // exponent too big - {DecimalStringKind, "1E-999", false}, - {DecimalStringKind, "", false}, - {DecimalStringKind, "abc", false}, - {DecimalStringKind, "abc", false}, - {JSONKind, json.RawMessage(`{"a":10}`), true}, - {JSONKind, json.RawMessage("10"), true}, - {JSONKind, json.RawMessage("10.0"), true}, - {JSONKind, json.RawMessage("true"), true}, - {JSONKind, json.RawMessage("null"), true}, - {JSONKind, json.RawMessage(`"abc"`), true}, - {JSONKind, json.RawMessage(`[1,true,0.1,"abc",{"b":3}]`), true}, - {JSONKind, json.RawMessage(`"abc`), false}, - {JSONKind, json.RawMessage(`tru`), false}, - {JSONKind, json.RawMessage(`[`), false}, - {JSONKind, json.RawMessage(`{`), false}, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("test %v %s", tt.kind, tt.value), func(t *testing.T) { - err := tt.kind.ValidateValue(tt.value) - if tt.valid && err != nil { - t.Errorf("test %d: expected valid value %v for kind %s to pass validation, got: %v", i, tt.value, tt.kind, err) - } - if !tt.valid && err == nil { - t.Errorf("test %d: expected invalid value %v for kind %s to fail validation, got: %v", i, tt.value, tt.kind, err) - } - }) - } -} - -func TestKind_String(t *testing.T) { - tests := []struct { - kind Kind - want string - }{ - {StringKind, "string"}, - {BytesKind, "bytes"}, - {Int8Kind, "int8"}, - {Uint8Kind, "uint8"}, - {Int16Kind, "int16"}, - {Uint16Kind, "uint16"}, - {Int32Kind, "int32"}, - {Uint32Kind, "uint32"}, - {Int64Kind, "int64"}, - {Uint64Kind, "uint64"}, - {IntegerStringKind, "integer"}, - {DecimalStringKind, "decimal"}, - {BoolKind, "bool"}, - {TimeKind, "time"}, - {DurationKind, "duration"}, - {Float32Kind, "float32"}, - {Float64Kind, "float64"}, - {JSONKind, "json"}, - {EnumKind, "enum"}, - {AddressKind, "bech32address"}, - {InvalidKind, "invalid(0)"}, - } - for i, tt := range tests { - t.Run(fmt.Sprintf("test %s", tt.kind), func(t *testing.T) { - if got := tt.kind.String(); got != tt.want { - t.Errorf("test %d: Kind.String() = %v, want %v", i, got, tt.want) - } - }) - } -} - -func TestKindForGoValue(t *testing.T) { - tests := []struct { - value interface{} - want Kind - }{ - {"hello", StringKind}, - {[]byte("hello"), BytesKind}, - {int8(1), Int8Kind}, - {uint8(1), Uint8Kind}, - {int16(1), Int16Kind}, - {uint16(1), Uint16Kind}, - {int32(1), Int32Kind}, - {uint32(1), Uint32Kind}, - {int64(1), Int64Kind}, - {uint64(1), Uint64Kind}, - {float32(1.0), Float32Kind}, - {float64(1.0), Float64Kind}, - {true, BoolKind}, - {time.Now(), TimeKind}, - {time.Second, DurationKind}, - {json.RawMessage("{}"), JSONKind}, - {map[string]interface{}{"a": 1}, InvalidKind}, - } - for i, tt := range tests { - t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { - if got := KindForGoValue(tt.value); got != tt.want { - t.Errorf("test %d: KindForGoValue(%v) = %v, want %v", i, tt.value, got, tt.want) - } - - // for valid kinds check valid value - if tt.want.Validate() == nil { - if err := tt.want.ValidateValue(tt.value); err != nil { - t.Errorf("test %d: expected valid value %v for kind %s to pass validation, got: %v", i, tt.value, tt.want, err) - } - } - }) - } -} diff --git a/schema/logutil/logger.go b/schema/logutil/logger.go deleted file mode 100644 index cb6b34ebfd..0000000000 --- a/schema/logutil/logger.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package logutil defines the Logger interface expected by indexer implementations. -// It is implemented by cosmossdk.io/log which is not imported to minimize dependencies. -package logutil - -// Logger is the logger interface expected by indexer implementations. -type Logger interface { - // Info takes a message and a set of key/value pairs and logs with level INFO. - // The key of the tuple must be a string. - Info(msg string, keyVals ...interface{}) - - // Warn takes a message and a set of key/value pairs and logs with level WARN. - // The key of the tuple must be a string. - Warn(msg string, keyVals ...interface{}) - - // Error takes a message and a set of key/value pairs and logs with level ERR. - // The key of the tuple must be a string. - Error(msg string, keyVals ...interface{}) - - // Debug takes a message and a set of key/value pairs and logs with level DEBUG. - // The key of the tuple must be a string. - Debug(msg string, keyVals ...interface{}) -} - -// NoopLogger is a logger that doesn't do anything. -type NoopLogger struct{} - -func (n NoopLogger) Info(string, ...interface{}) {} - -func (n NoopLogger) Warn(string, ...interface{}) {} - -func (n NoopLogger) Error(string, ...interface{}) {} - -func (n NoopLogger) Debug(string, ...interface{}) {} - -var _ Logger = NoopLogger{} diff --git a/schema/module_schema.go b/schema/module_schema.go deleted file mode 100644 index 9412c4456c..0000000000 --- a/schema/module_schema.go +++ /dev/null @@ -1,31 +0,0 @@ -package schema - -import "fmt" - -// ModuleSchema represents the logical schema of a module for purposes of indexing and querying. -type ModuleSchema struct { - // ObjectTypes describe the types of objects that are part of the module's schema. - ObjectTypes []ObjectType -} - -// Validate validates the module schema. -func (s ModuleSchema) Validate() error { - enumValueMap := map[string]map[string]bool{} - for _, objType := range s.ObjectTypes { - if err := objType.validate(enumValueMap); err != nil { - return err - } - } - - return nil -} - -// ValidateObjectUpdate validates that the update conforms to the module schema. -func (s ModuleSchema) ValidateObjectUpdate(update ObjectUpdate) error { - for _, objType := range s.ObjectTypes { - if objType.Name == update.TypeName { - return objType.ValidateObjectUpdate(update) - } - } - return fmt.Errorf("object type %q not found in module schema", update.TypeName) -} diff --git a/schema/module_schema_test.go b/schema/module_schema_test.go deleted file mode 100644 index d04327811b..0000000000 --- a/schema/module_schema_test.go +++ /dev/null @@ -1,229 +0,0 @@ -package schema - -import ( - "strings" - "testing" -) - -func TestModuleSchema_Validate(t *testing.T) { - tests := []struct { - name string - moduleSchema ModuleSchema - errContains string - }{ - { - name: "valid module schema", - moduleSchema: ModuleSchema{ - ObjectTypes: []ObjectType{ - { - Name: "object1", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - }, - }, - }, - errContains: "", - }, - { - name: "invalid object type", - moduleSchema: ModuleSchema{ - ObjectTypes: []ObjectType{ - { - Name: "", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - }, - }, - }, - errContains: "invalid object type name", - }, - { - name: "same enum with missing values", - moduleSchema: ModuleSchema{ - ObjectTypes: []ObjectType{ - { - Name: "object1", - KeyFields: []Field{ - { - Name: "k", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"a", "b"}, - }, - }, - }, - ValueFields: []Field{ - { - Name: "v", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"a", "b", "c"}, - }, - }, - }, - }, - }, - }, - errContains: "different number of values", - }, - { - name: "same enum with different values", - moduleSchema: ModuleSchema{ - ObjectTypes: []ObjectType{ - { - Name: "object1", - KeyFields: []Field{ - { - Name: "k", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"a", "b"}, - }, - }, - }, - }, - { - Name: "object2", - KeyFields: []Field{ - { - Name: "k", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"a", "c"}, - }, - }, - }, - }, - }, - }, - errContains: "different values", - }, - { - name: "same enum", - moduleSchema: ModuleSchema{ - ObjectTypes: []ObjectType{ - { - Name: "object1", - KeyFields: []Field{ - { - Name: "k", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"a", "b"}, - }, - }, - }, - }, - { - Name: "object2", - KeyFields: []Field{ - { - Name: "k", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"a", "b"}, - }, - }, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.moduleSchema.Validate() - if tt.errContains == "" { - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - } else { - if err == nil || !strings.Contains(err.Error(), tt.errContains) { - t.Fatalf("expected error to contain %q, got: %v", tt.errContains, err) - } - } - }) - } -} - -func TestModuleSchema_ValidateObjectUpdate(t *testing.T) { - tests := []struct { - name string - moduleSchema ModuleSchema - objectUpdate ObjectUpdate - errContains string - }{ - { - name: "valid object update", - moduleSchema: ModuleSchema{ - ObjectTypes: []ObjectType{ - { - Name: "object1", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - }, - }, - }, - objectUpdate: ObjectUpdate{ - TypeName: "object1", - Key: "abc", - }, - errContains: "", - }, - { - name: "object type not found", - moduleSchema: ModuleSchema{ - ObjectTypes: []ObjectType{ - { - Name: "object1", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - }, - }, - }, - objectUpdate: ObjectUpdate{ - TypeName: "object2", - Key: "abc", - }, - errContains: "object type \"object2\" not found in module schema", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.moduleSchema.ValidateObjectUpdate(tt.objectUpdate) - if tt.errContains == "" { - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - } else { - if err == nil || !strings.Contains(err.Error(), tt.errContains) { - t.Fatalf("expected error to contain %q, got: %v", tt.errContains, err) - } - } - }) - } -} diff --git a/schema/name.go b/schema/name.go deleted file mode 100644 index fc384fec66..0000000000 --- a/schema/name.go +++ /dev/null @@ -1,15 +0,0 @@ -package schema - -import "regexp" - -// NameFormat is the regular expression that a name must match. -// A name must start with a letter or underscore and can only contain letters, numbers, and underscores. -// A name must be at least one character long and can be at most 64 characters long. -const NameFormat = `^[a-zA-Z_][a-zA-Z0-9_]{0,63}$` - -var nameRegex = regexp.MustCompile(NameFormat) - -// ValidateName checks if the given name is a valid name conforming to NameFormat. -func ValidateName(name string) bool { - return nameRegex.MatchString(name) -} diff --git a/schema/name_test.go b/schema/name_test.go deleted file mode 100644 index 2383b880db..0000000000 --- a/schema/name_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package schema - -import "testing" - -func TestValidateName(t *testing.T) { - tests := []struct { - name string - valid bool - }{ - {"", false}, - {"a", true}, - {"A", true}, - {"_", true}, - {"abc123_def789", true}, - {"0", false}, - {"a0", true}, - {"a_", true}, - {"$a", false}, - {"a b", false}, - {"pretty_unnecessarily_long_but_valid_name", true}, - {"totally_unnecessarily_long_and_invalid_name_sdgkhwersdglkhweriqwery3258", false}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if ValidateName(test.name) != test.valid { - t.Errorf("expected %v for name %q", test.valid, test.name) - } - }) - } -} diff --git a/schema/object_type.go b/schema/object_type.go deleted file mode 100644 index a8fa432d80..0000000000 --- a/schema/object_type.go +++ /dev/null @@ -1,100 +0,0 @@ -package schema - -import "fmt" - -// ObjectType describes an object type a module schema. -type ObjectType struct { - // Name is the name of the object type. It must be unique within the module schema - // and conform to the NameFormat regular expression. - Name string - - // KeyFields is a list of fields that make up the primary key of the object. - // It can be empty in which case indexers should assume that this object is - // a singleton and only has one value. Field names must be unique within the - // object between both key and value fields. Key fields CANNOT be nullable. - KeyFields []Field - - // ValueFields is a list of fields that are not part of the primary key of the object. - // It can be empty in the case where all fields are part of the primary key. - // Field names must be unique within the object between both key and value fields. - ValueFields []Field - - // RetainDeletions is a flag that indicates whether the indexer should retain - // deleted rows in the database and flag them as deleted rather than actually - // deleting the row. For many types of data in state, the data is deleted even - // though it is still valid in order to save space. Indexers will want to have - // the option of retaining such data and distinguishing from other "true" deletions. - RetainDeletions bool -} - -// Validate validates the object type. -func (o ObjectType) Validate() error { - return o.validate(map[string]map[string]bool{}) -} - -// validate validates the object type with an enumValueMap that can be -// shared across a whole module schema. -func (o ObjectType) validate(enumValueMap map[string]map[string]bool) error { - if !ValidateName(o.Name) { - return fmt.Errorf("invalid object type name %q", o.Name) - } - - fieldNames := map[string]bool{} - - for _, field := range o.KeyFields { - if err := field.Validate(); err != nil { - return fmt.Errorf("invalid key field %q: %v", field.Name, err) //nolint:errorlint // false positive due to using go1.12 - } - - if field.Nullable { - return fmt.Errorf("key field %q cannot be nullable", field.Name) - } - - if fieldNames[field.Name] { - return fmt.Errorf("duplicate field name %q", field.Name) - } - fieldNames[field.Name] = true - - if err := checkEnumCompatibility(enumValueMap, field); err != nil { - return err - } - } - - for _, field := range o.ValueFields { - if err := field.Validate(); err != nil { - return fmt.Errorf("invalid value field %q: %v", field.Name, err) //nolint:errorlint // false positive due to using go1.12 - } - - if fieldNames[field.Name] { - return fmt.Errorf("duplicate field name %q", field.Name) - } - fieldNames[field.Name] = true - - if err := checkEnumCompatibility(enumValueMap, field); err != nil { - return err - } - } - - if len(o.KeyFields) == 0 && len(o.ValueFields) == 0 { - return fmt.Errorf("object type %q has no key or value fields", o.Name) - } - - return nil -} - -// ValidateObjectUpdate validates that the update conforms to the object type. -func (o ObjectType) ValidateObjectUpdate(update ObjectUpdate) error { - if o.Name != update.TypeName { - return fmt.Errorf("object type name %q does not match update type name %q", o.Name, update.TypeName) - } - - if err := ValidateObjectKey(o.KeyFields, update.Key); err != nil { - return fmt.Errorf("invalid key for object type %q: %v", update.TypeName, err) //nolint:errorlint // false positive due to using go1.12 - } - - if update.Delete { - return nil - } - - return ValidateObjectValue(o.ValueFields, update.Value) -} diff --git a/schema/object_type_test.go b/schema/object_type_test.go deleted file mode 100644 index 0a78a371aa..0000000000 --- a/schema/object_type_test.go +++ /dev/null @@ -1,270 +0,0 @@ -package schema - -import ( - "strings" - "testing" -) - -var object1Type = ObjectType{ - Name: "object1", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, -} - -var object2Type = ObjectType{ - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - { - Name: "field2", - Kind: Int32Kind, - }, - }, -} - -var object3Type = ObjectType{ - Name: "object3", - ValueFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - { - Name: "field2", - Kind: Int32Kind, - }, - }, -} - -var object4Type = ObjectType{ - Name: "object4", - KeyFields: []Field{ - { - Name: "field1", - Kind: Int32Kind, - }, - }, - ValueFields: []Field{ - { - Name: "field2", - Kind: StringKind, - }, - }, -} - -func TestObjectType_Validate(t *testing.T) { - tests := []struct { - name string - objectType ObjectType - errContains string - }{ - { - name: "valid object type", - objectType: object1Type, - errContains: "", - }, - { - name: "empty object type name", - objectType: ObjectType{ - Name: "", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - }, - errContains: "invalid object type name", - }, - { - name: "invalid key field", - objectType: ObjectType{ - Name: "object1", - KeyFields: []Field{ - { - Name: "", - Kind: StringKind, - }, - }, - }, - errContains: "invalid field name", - }, - { - name: "invalid value field", - objectType: ObjectType{ - Name: "object1", - ValueFields: []Field{ - { - Kind: StringKind, - }, - }, - }, - errContains: "invalid field name", - }, - { - name: "no fields", - objectType: ObjectType{Name: "object0"}, - errContains: "has no key or value fields", - }, - { - name: "duplicate field", - objectType: ObjectType{ - Name: "object1", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - ValueFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - }, - }, - errContains: "duplicate field name", - }, - { - name: "duplicate field 22", - objectType: ObjectType{ - Name: "object1", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - }, - { - Name: "field1", - Kind: StringKind, - }, - }, - }, - errContains: "duplicate field name", - }, - { - name: "nullable key field", - objectType: ObjectType{ - Name: "objectNullKey", - KeyFields: []Field{ - { - Name: "field1", - Kind: StringKind, - Nullable: true, - }, - }, - }, - errContains: "key field \"field1\" cannot be nullable", - }, - { - name: "duplicate incompatible enum", - objectType: ObjectType{ - Name: "objectWithEnums", - KeyFields: []Field{ - { - Name: "key", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"a", "b"}, - }, - }, - }, - ValueFields: []Field{ - { - Name: "value", - Kind: EnumKind, - EnumDefinition: EnumDefinition{ - Name: "enum1", - Values: []string{"c", "b"}, - }, - }, - }, - }, - errContains: "enum \"enum1\" has different values", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.objectType.Validate() - if tt.errContains == "" { - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - } else { - if err == nil || !strings.Contains(err.Error(), tt.errContains) { - t.Fatalf("expected error to contain %q, got: %v", tt.errContains, err) - } - } - }) - } -} - -func TestObjectType_ValidateObjectUpdate(t *testing.T) { - tests := []struct { - name string - objectType ObjectType - object ObjectUpdate - errContains string - }{ - { - name: "wrong name", - objectType: object1Type, - object: ObjectUpdate{ - TypeName: "object2", - Key: "hello", - }, - errContains: "does not match update type name", - }, - { - name: "invalid value", - objectType: object1Type, - object: ObjectUpdate{ - TypeName: "object1", - Key: 123, - }, - errContains: "invalid value", - }, - { - name: "valid update", - objectType: object4Type, - object: ObjectUpdate{ - TypeName: "object4", - Key: int32(123), - Value: "hello", - }, - }, - { - name: "valid deletion", - objectType: object4Type, - object: ObjectUpdate{ - TypeName: "object4", - Key: int32(123), - Value: "ignored!", - Delete: true, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.objectType.ValidateObjectUpdate(tt.object) - if tt.errContains == "" { - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - } else { - if err == nil || !strings.Contains(err.Error(), tt.errContains) { - t.Fatalf("expected error to contain %q, got: %v", tt.errContains, err) - } - } - }) - } -} diff --git a/schema/object_update.go b/schema/object_update.go deleted file mode 100644 index 455c4a850a..0000000000 --- a/schema/object_update.go +++ /dev/null @@ -1,61 +0,0 @@ -package schema - -import "sort" - -// ObjectUpdate represents an update operation on an object in a module's state. -type ObjectUpdate struct { - // TypeName is the name of the object type in the module's schema. - TypeName string - - // Key returns the value of the primary key of the object and must conform to these constraints with respect - // that the schema that is defined for the object: - // - if key represents a single field, then the value must be valid for the first field in that - // field list. For instance, if there is one field in the key of type String, then the value must be of - // type string - // - if key represents multiple fields, then the value must be a slice of values where each value is valid - // for the corresponding field in the field list. For instance, if there are two fields in the key of - // type String, String, then the value must be a slice of two strings. - // If the key has no fields, meaning that this is a singleton object, then this value is ignored and can be nil. - Key interface{} - - // Value returns the non-primary key fields of the object and can either conform to the same constraints - // as ObjectUpdate.Key or it may be and instance of ValueUpdates. ValueUpdates can be used as a performance - // optimization to avoid copying the values of the object into the update and/or to omit unchanged fields. - // If this is a delete operation, then this value is ignored and can be nil. - Value interface{} - - // Delete is a flag that indicates whether this update is a delete operation. If true, then the Value field - // is ignored and can be nil. - Delete bool -} - -// ValueUpdates is an interface that represents the value fields of an object update. fields that -// were not updated may be excluded from the update. Consumers should be aware that implementations -// may not filter out fields that were unchanged. However, if a field is omitted from the update -// it should be considered unchanged. -type ValueUpdates interface { - // Iterate iterates over the fields and values in the object update. The function should return - // true to continue iteration or false to stop iteration. Each field value should conform - // to the requirements of that field's type in the schema. Iterate returns an error if - // it was unable to decode the values properly (which could be the case in lazy evaluation). - Iterate(func(col string, value interface{}) bool) error -} - -// MapValueUpdates is a map-based implementation of ValueUpdates which always iterates -// over keys in sorted order. -type MapValueUpdates map[string]interface{} - -// Iterate implements the ValueUpdates interface. -func (m MapValueUpdates) Iterate(fn func(col string, value interface{}) bool) error { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - if !fn(k, m[k]) { - return nil - } - } - return nil -} diff --git a/schema/object_update_test.go b/schema/object_update_test.go deleted file mode 100644 index eb5a156f18..0000000000 --- a/schema/object_update_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package schema - -import "testing" - -func TestMapValueUpdates_Iterate(t *testing.T) { - updates := MapValueUpdates(map[string]interface{}{ - "a": "abc", - "b": 123, - }) - - got := map[string]interface{}{} - err := updates.Iterate(func(fieldname string, value interface{}) bool { - got[fieldname] = value - return true - }) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(got) != 2 { - t.Errorf("expected 2 updates, got: %v", got) - } - - if got["a"] != "abc" { - t.Errorf("expected a=abc, got: %v", got) - } - - if got["b"] != 123 { - t.Errorf("expected b=123, got: %v", got) - } - - got = map[string]interface{}{} - err = updates.Iterate(func(fieldname string, value interface{}) bool { - if len(got) == 1 { - return false - } - got[fieldname] = value - return true - }) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(got) != 1 { - t.Errorf("expected 1 updates, got: %v", got) - } - - // should have gotten the first field in order - if got["a"] != "abc" { - t.Errorf("expected a=abc, got: %v", got) - } -} diff --git a/schema/sonar-project.properties b/schema/sonar-project.properties deleted file mode 100644 index 6839d8a914..0000000000 --- a/schema/sonar-project.properties +++ /dev/null @@ -1,16 +0,0 @@ -sonar.projectKey=cosmos-sdk-schema -sonar.organization=cosmos - -sonar.projectName=Cosmos SDK - Schema -sonar.project.monorepo.enabled=true - -sonar.sources=. -sonar.exclusions=**/*_test.go,**/*.pb.go,**/*.pulsar.go,**/*.pb.gw.go -sonar.coverage.exclusions=**/*_test.go,**/testutil/**,**/*.pb.go,**/*.pb.gw.go,**/*.pulsar.go,test_helpers.go,docs/** -sonar.tests=. -sonar.test.inclusions=**/*_test.go -sonar.go.coverage.reportPaths=coverage.out - -sonar.sourceEncoding=UTF-8 -sonar.scm.provider=git -sonar.scm.forceReloadAll=true diff --git a/server/v2/api/grpc/codec.go b/server/v2/api/grpc/codec.go deleted file mode 100644 index d0d885c041..0000000000 --- a/server/v2/api/grpc/codec.go +++ /dev/null @@ -1,95 +0,0 @@ -package grpc - -import ( - "errors" - "fmt" - - gogoproto "github.com/cosmos/gogoproto/proto" - "google.golang.org/grpc/encoding" - "google.golang.org/protobuf/proto" - - _ "cosmossdk.io/api/amino" // Import amino.proto file for reflection - appmanager "cosmossdk.io/core/app" -) - -type protoCodec struct { - interfaceRegistry appmanager.InterfaceRegistry -} - -// newProtoCodec returns a reference to a new ProtoCodec -func newProtoCodec(interfaceRegistry appmanager.InterfaceRegistry) *protoCodec { - return &protoCodec{ - interfaceRegistry: interfaceRegistry, - } -} - -// Marshal implements BinaryMarshaler.Marshal method. -// NOTE: this function must be used with a concrete type which -// implements proto.Message. For interface please use the codec.MarshalInterface -func (pc *protoCodec) Marshal(o gogoproto.Message) ([]byte, error) { - // Size() check can catch the typed nil value. - if o == nil || gogoproto.Size(o) == 0 { - // return empty bytes instead of nil, because nil has special meaning in places like store.Set - return []byte{}, nil - } - - return gogoproto.Marshal(o) -} - -// Unmarshal implements BinaryMarshaler.Unmarshal method. -// NOTE: this function must be used with a concrete type which -// implements proto.Message. For interface please use the codec.UnmarshalInterface -func (pc *protoCodec) Unmarshal(bz []byte, ptr gogoproto.Message) error { - err := gogoproto.Unmarshal(bz, ptr) - if err != nil { - return err - } - // err = codectypes.UnpackInterfaces(ptr, pc.interfaceRegistry) // TODO: identify if needed for grpc - // if err != nil { - // return err - // } - return nil -} - -func (pc *protoCodec) Name() string { - return "cosmos-sdk-grpc-codec" -} - -// GRPCCodec returns the gRPC Codec for this specific ProtoCodec -func (pc *protoCodec) GRPCCodec() encoding.Codec { - return &grpcProtoCodec{cdc: pc} -} - -// grpcProtoCodec is the implementation of the gRPC proto codec. -type grpcProtoCodec struct { - cdc appmanager.ProtoCodec -} - -var errUnknownProtoType = errors.New("codec: unknown proto type") // sentinel error - -func (c grpcProtoCodec) Marshal(v any) ([]byte, error) { - switch m := v.(type) { - case proto.Message: - protov2MarshalOpts := proto.MarshalOptions{Deterministic: true} - return protov2MarshalOpts.Marshal(m) - case gogoproto.Message: - return c.cdc.Marshal(m) - default: - return nil, fmt.Errorf("%w: cannot marshal type %T", errUnknownProtoType, v) - } -} - -func (c grpcProtoCodec) Unmarshal(data []byte, v any) error { - switch m := v.(type) { - case proto.Message: - return proto.Unmarshal(data, m) - case gogoproto.Message: - return c.cdc.Unmarshal(data, m) - default: - return fmt.Errorf("%w: cannot unmarshal type %T", errUnknownProtoType, v) - } -} - -func (c grpcProtoCodec) Name() string { - return "cosmos-sdk-grpc-codec" -} diff --git a/server/v2/api/grpc/config.go b/server/v2/api/grpc/config.go deleted file mode 100644 index 86fb514e70..0000000000 --- a/server/v2/api/grpc/config.go +++ /dev/null @@ -1,51 +0,0 @@ -package grpc - -import "math" - -func DefaultConfig() *Config { - return &Config{ - Enable: true, - // DefaultGRPCAddress defines the default address to bind the gRPC server to. - Address: "localhost:9090", - // DefaultGRPCMaxRecvMsgSize defines the default gRPC max message size in - // bytes the server can receive. - MaxRecvMsgSize: 1024 * 1024 * 10, - // DefaultGRPCMaxSendMsgSize defines the default gRPC max message size in - // bytes the server can send. - MaxSendMsgSize: math.MaxInt32, - } -} - -// Config defines configuration for the gRPC server. -type Config struct { - // Enable defines if the gRPC server should be enabled. - Enable bool `mapstructure:"enable" toml:"enable" comment:"Enable defines if the gRPC server should be enabled."` - - // Address defines the API server to listen on - Address string `mapstructure:"address" toml:"address" comment:"Address defines the gRPC server address to bind to."` - - // MaxRecvMsgSize defines the max message size in bytes the server can receive. - // The default value is 10MB. - MaxRecvMsgSize int `mapstructure:"max-recv-msg-size" toml:"max-recv-msg-size" comment:"MaxRecvMsgSize defines the max message size in bytes the server can receive.\nThe default value is 10MB."` - - // MaxSendMsgSize defines the max message size in bytes the server can send. - // The default value is math.MaxInt32. - MaxSendMsgSize int `mapstructure:"max-send-msg-size" toml:"max-send-msg-size" comment:"MaxSendMsgSize defines the max message size in bytes the server can send.\nThe default value is math.MaxInt32."` -} - -// CfgOption is a function that allows to overwrite the default server configuration. -type CfgOption func(*Config) - -// OverwriteDefaultConfig overwrites the default config with the new config. -func OverwriteDefaultConfig(newCfg *Config) CfgOption { - return func(cfg *Config) { - *cfg = *newCfg - } -} - -// Disable the grpc-gateway server by default (default enabled). -func Disable() CfgOption { - return func(cfg *Config) { - cfg.Enable = false - } -} diff --git a/server/v2/api/grpc/flags.go b/server/v2/api/grpc/flags.go deleted file mode 100644 index be2f49cccd..0000000000 --- a/server/v2/api/grpc/flags.go +++ /dev/null @@ -1,12 +0,0 @@ -package grpc - -import "fmt" - -// start flags are prefixed with the server name -// as the config in prefixed with the server name -// this allows viper to properly bind the flags -func prefix(f string) string { - return fmt.Sprintf("%s.%s", ServerName, f) -} - -var FlagAddress = prefix("address") diff --git a/server/v2/api/grpc/gogoreflection/doc.go b/server/v2/api/grpc/gogoreflection/doc.go deleted file mode 100644 index 691e632d0e..0000000000 --- a/server/v2/api/grpc/gogoreflection/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package gogoreflection implements gRPC reflection for gogoproto consumers -// the normal reflection library does not work as it points to a different -// singleton registry. The API and codebase is taken from the official gRPC -// reflection repository. -package gogoreflection diff --git a/server/v2/api/grpc/gogoreflection/fix_registration.go b/server/v2/api/grpc/gogoreflection/fix_registration.go deleted file mode 100644 index ab1a18f592..0000000000 --- a/server/v2/api/grpc/gogoreflection/fix_registration.go +++ /dev/null @@ -1,76 +0,0 @@ -package gogoreflection - -import ( - "reflect" - - _ "github.com/cosmos/gogoproto/gogoproto" // required so it does register the gogoproto file descriptor - gogoproto "github.com/cosmos/gogoproto/proto" - - _ "github.com/cosmos/cosmos-proto" // look above - "github.com/golang/protobuf/proto" //nolint:staticcheck // migrate in a future pr -) - -func getFileDescriptor(filePath string) []byte { - // Since we got well known descriptors which are not registered into gogoproto - // registry but are instead registered into the proto one, we need to check both. - fd := gogoproto.FileDescriptor(filePath) - if len(fd) != 0 { - return fd - } - - return proto.FileDescriptor(filePath) //nolint:staticcheck // keep for backward compatibility -} - -func getMessageType(name string) reflect.Type { - typ := gogoproto.MessageType(name) - if typ != nil { - return typ - } - - return proto.MessageType(name) //nolint:staticcheck // keep for backward compatibility -} - -func getExtension(extID int32, m proto.Message) *gogoproto.ExtensionDesc { - // check first in gogoproto registry - for id, desc := range gogoproto.RegisteredExtensions(m) { - if id == extID { - return desc - } - } - - // check into proto registry - for id, desc := range proto.RegisteredExtensions(m) { //nolint:staticcheck // keep for backward compatibility - if id == extID { - return &gogoproto.ExtensionDesc{ - ExtendedType: desc.ExtendedType, //nolint:staticcheck // keep for backward compatibility - ExtensionType: desc.ExtensionType, //nolint:staticcheck // keep for backward compatibility - Field: desc.Field, //nolint:staticcheck // keep for backward compatibility - Name: desc.Name, //nolint:staticcheck // keep for backward compatibility - Tag: desc.Tag, //nolint:staticcheck // keep for backward compatibility - Filename: desc.Filename, //nolint:staticcheck // keep for backward compatibility - } - } - } - - return nil -} - -func getExtensionsNumbers(m proto.Message) []int32 { - gogoProtoExts := gogoproto.RegisteredExtensions(m) - - out := make([]int32, 0, len(gogoProtoExts)) - for id := range gogoProtoExts { - out = append(out, id) - } - if len(out) != 0 { - return out - } - - protoExts := proto.RegisteredExtensions(m) //nolint:staticcheck // kept for backwards compatibility - out = make([]int32, 0, len(protoExts)) - for id := range protoExts { - out = append(out, id) - } - - return out -} diff --git a/server/v2/api/grpc/gogoreflection/fix_registration_test.go b/server/v2/api/grpc/gogoreflection/fix_registration_test.go deleted file mode 100644 index 0693556688..0000000000 --- a/server/v2/api/grpc/gogoreflection/fix_registration_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package gogoreflection - -import ( - "testing" - - "google.golang.org/protobuf/runtime/protoimpl" -) - -func TestRegistrationFix(t *testing.T) { - res := getFileDescriptor("gogoproto/gogo.proto") - rawDesc, err := decompress(res) - if err != nil { - t.Fatal(err) - } - fd := protoimpl.DescBuilder{ - RawDescriptor: rawDesc, - }.Build() - - if fd.File.Extensions().Len() == 0 { - t.Fatal("unexpected parsing") - } -} diff --git a/server/v2/api/grpc/gogoreflection/serverreflection.go b/server/v2/api/grpc/gogoreflection/serverreflection.go deleted file mode 100644 index 79f520545a..0000000000 --- a/server/v2/api/grpc/gogoreflection/serverreflection.go +++ /dev/null @@ -1,483 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* -Package gogoreflection implements server reflection service. - -The service implemented is defined in: -https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. - -To register server reflection on a gRPC server: - - import "google.golang.org/grpc/reflection" - - s := grpc.NewServer() - pb.RegisterYourOwnServer(s, &server{}) - - // Register reflection service on gRPC server. - reflection.Register(s) - - s.Serve(lis) -*/ -package gogoreflection // import "google.golang.org/grpc/reflection" - -import ( - "bytes" - "compress/gzip" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" - - gogoproto "github.com/cosmos/gogoproto/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - - "cosmossdk.io/core/log" -) - -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s *grpc.Server - - methods []string - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files - log log.Logger -} - -// Register registers the server reflection service on the given gRPC server. -func Register(s *grpc.Server, methods []string, logger log.Logger) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - methods: methods, - log: logger, - }) -} - -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) -} - -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - s.symbols = map[string]*dpb.FileDescriptorProto{} - services, fds := s.getServices(s.methods) - s.serviceNames = services - - processed := map[string]struct{}{} - for _, fd := range fds { - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols -} - -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} - - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) - } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) - } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := getFileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) - } -} - -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd - - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } -} - -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } -} - -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd -} - -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name -} - -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) -} - -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %w", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := gogoproto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %w", err) - } - return fd, nil -} - -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %w", err) - } - out, err := io.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %w", err) - } - return out, nil -} - -func typeForName(name string) (reflect.Type, error) { - pt := getMessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil -} - -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(gogoproto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - extDesc := getExtension(ext, m) - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(getFileDescriptor(extDesc.Filename)) -} - -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(gogoproto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - out := getExtensionsNumbers(m) - return out, nil -} - -// fileDescWithDependencies returns a slice of serialized fileDescriptors in -// wire format ([]byte). The fileDescriptors will include fd and all the -// transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} - for len(queue) > 0 { - currentfd := queue[0] - queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := gogoproto.Marshal(currentfd) - if err != nil { - return nil, err - } - r = append(r, currentfdEncoded) - } - for _, dep := range currentfd.Dependency { - fdenc := getFileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) - } - } - return r, nil -} - -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshaling -// on them, and returns the marshaled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := getFileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// fileDescEncodingContainingSymbol finds the file descriptor containing the -// given symbol, finds all of its previously unsent transitive dependencies, -// does marshaling on them, and returns the marshaled result. The given symbol -// can be a type, a service or a method. -func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } - } - } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) - } - - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// fileDescEncodingContainingExtension finds the file descriptor containing -// given extension, finds all of its previously unsent transitive dependencies, -// does marshaling on them, and returns the marshaled result. -func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) - if err != nil { - return nil, err - } - fd, err := fileDescContainingExtension(st, extNum) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// allExtensionNumbersForTypeName returns all extension numbers for the given type. -func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) - if err != nil { - return nil, err - } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err - } - return extNums, nil -} - -// ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { - sentFileDescriptors := make(map[string]bool) - for { - in, err := stream.Recv() - if errors.Is(err, io.EOF) { - return nil - } - if err != nil { - return err - } - - out := &rpb.ServerReflectionResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - ValidHost: in.Host, //nolint:staticcheck // SA1019: we want to keep using v1alpha - OriginalRequest: in, - } - switch req := in.MessageRequest.(type) { - case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) //nolint:staticcheck // SA1019: we want to keep using v1alpha - if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, //nolint:staticcheck // SA1019: we want to keep using v1alpha - } - } - case *rpb.ServerReflectionRequest_FileContainingSymbol: - b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) //nolint:staticcheck // SA1019: we want to keep using v1alpha - if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, //nolint:staticcheck // SA1019: we want to keep using v1alpha - } - } - case *rpb.ServerReflectionRequest_FileContainingExtension: - typeName := req.FileContainingExtension.ContainingType //nolint:staticcheck // SA1019: we want to keep using v1alpha - extNum := req.FileContainingExtension.ExtensionNumber //nolint:staticcheck // SA1019: we want to keep using v1alpha - b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) - if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, //nolint:staticcheck // SA1019: we want to keep using v1alpha - } - } - case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: - extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) //nolint:staticcheck // SA1019: we want to keep using v1alpha - if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - BaseTypeName: req.AllExtensionNumbersOfType, //nolint:staticcheck // SA1019: we want to keep using v1alpha - ExtensionNumber: extNums, - }, - } - } - case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) //nolint:staticcheck // SA1019: we want to keep using v1alpha - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - Name: n, - } - } - out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &rpb.ListServiceResponse{ //nolint:staticcheck // SA1019: we want to keep using v1alpha - Service: serviceResponses, - }, - } - default: - return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) - } - if err := stream.Send(out); err != nil { - return err - } - } -} - -// getServices gets the unique list of services given a list of methods. -func (s *serverReflectionServer) getServices(methods []string) (svcs []string, fds []*dpb.FileDescriptorProto) { - registry, err := gogoproto.MergedRegistry() - if err != nil { - s.log.Error("unable to load merged registry", "err", err) - return nil, nil - } - seenSvc := map[protoreflect.FullName]struct{}{} - for _, methodName := range methods { - methodName = strings.Join(strings.Split(methodName[1:], "/"), ".") - md, err := registry.FindDescriptorByName(protoreflect.FullName(methodName)) - if err != nil { - s.log.Error("unable to load method descriptor", "method", methodName, "err", err) - continue - } - svc := md.(protoreflect.MethodDescriptor).Parent() - if _, seen := seenSvc[svc.FullName()]; !seen { - svcs = append(svcs, string(svc.FullName())) - file := svc.ParentFile() - fds = append(fds, protodesc.ToFileDescriptorProto(file)) - } - } - return -} diff --git a/server/v2/api/grpc/server.go b/server/v2/api/grpc/server.go deleted file mode 100644 index 5bce26508b..0000000000 --- a/server/v2/api/grpc/server.go +++ /dev/null @@ -1,200 +0,0 @@ -package grpc - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "strconv" - - "github.com/cosmos/gogoproto/proto" - "github.com/spf13/pflag" - "github.com/spf13/viper" - "golang.org/x/exp/maps" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - - "cosmossdk.io/core/transaction" - "cosmossdk.io/log" - serverv2 "cosmossdk.io/server/v2" - "cosmossdk.io/server/v2/api/grpc/gogoreflection" -) - -const ( - ServerName = "grpc" - - BlockHeightHeader = "x-cosmos-block-height" -) - -type Server[T transaction.Tx] struct { - logger log.Logger - config *Config - cfgOptions []CfgOption - - grpcSrv *grpc.Server -} - -// New creates a new grpc server. -func New[T transaction.Tx](cfgOptions ...CfgOption) *Server[T] { - return &Server[T]{ - cfgOptions: cfgOptions, - } -} - -// Init returns a correctly configured and initialized gRPC server. -// Note, the caller is responsible for starting the server. -func (s *Server[T]) Init(appI serverv2.AppI[T], v *viper.Viper, logger log.Logger) error { - cfg := s.Config().(*Config) - if v != nil { - if err := serverv2.UnmarshalSubConfig(v, s.Name(), &cfg); err != nil { - return fmt.Errorf("failed to unmarshal config: %w", err) - } - } - methodsMap := appI.GetGPRCMethodsToMessageMap() - - grpcSrv := grpc.NewServer( - grpc.ForceServerCodec(newProtoCodec(appI.InterfaceRegistry()).GRPCCodec()), - grpc.MaxSendMsgSize(cfg.MaxSendMsgSize), - grpc.MaxRecvMsgSize(cfg.MaxRecvMsgSize), - grpc.UnknownServiceHandler( - makeUnknownServiceHandler(methodsMap, appI.GetAppManager()), - ), - ) - - // Reflection allows external clients to see what services and methods the gRPC server exposes. - gogoreflection.Register(grpcSrv, maps.Keys(methodsMap), logger.With("sub-module", "grpc-reflection")) - - s.grpcSrv = grpcSrv - s.config = cfg - s.logger = logger.With(log.ModuleKey, s.Name()) - - return nil -} - -func (s *Server[T]) StartCmdFlags() *pflag.FlagSet { - flags := pflag.NewFlagSet(s.Name(), pflag.ExitOnError) - flags.String(FlagAddress, "localhost:9090", "Listen address") - return flags -} - -func makeUnknownServiceHandler(messageMap map[string]func() proto.Message, querier interface { - Query(ctx context.Context, version uint64, msg proto.Message) (proto.Message, error) -}, -) grpc.StreamHandler { - return func(srv any, stream grpc.ServerStream) error { - method, ok := grpc.MethodFromServerStream(stream) - if !ok { - return status.Error(codes.InvalidArgument, "unable to get method") - } - makeMsg, exists := messageMap[method] - if !exists { - return status.Errorf(codes.Unimplemented, "gRPC method %s is not handled", method) - } - for { - req := makeMsg() - err := stream.RecvMsg(req) - if err != nil { - if errors.Is(err, io.EOF) { - return nil - } - return err - } - - // extract height header - ctx := stream.Context() - height, err := getHeightFromCtx(ctx) - if err != nil { - return status.Errorf(codes.InvalidArgument, "invalid get height from context: %v", err) - } - resp, err := querier.Query(ctx, height, req) - if err != nil { - return err - } - err = stream.SendMsg(resp) - if err != nil { - return err - } - } - } -} - -func getHeightFromCtx(ctx context.Context) (uint64, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return 0, nil - } - values := md.Get(BlockHeightHeader) - if len(values) == 0 { - return 0, nil - } - if len(values) != 1 { - return 0, fmt.Errorf("gRPC height metadata must be of length 1, got: %d", len(values)) - } - - heightStr := values[0] - height, err := strconv.ParseUint(heightStr, 10, 64) - if err != nil { - return 0, fmt.Errorf("unable to parse height string from gRPC metadata %s: %w", heightStr, err) - } - - return height, nil -} - -func (s *Server[T]) Name() string { - return ServerName -} - -func (s *Server[T]) Config() any { - if s.config == nil || s.config == (&Config{}) { - cfg := DefaultConfig() - // overwrite the default config with the provided options - for _, opt := range s.cfgOptions { - opt(cfg) - } - - return cfg - } - - return s.config -} - -func (s *Server[T]) Start(ctx context.Context) error { - if !s.config.Enable { - return nil - } - - listener, err := net.Listen("tcp", s.config.Address) - if err != nil { - return fmt.Errorf("failed to listen on address %s: %w", s.config.Address, err) - } - - errCh := make(chan error) - - // Start the gRPC in an external goroutine as Serve is blocking and will return - // an error upon failure, which we'll send on the error channel that will be - // consumed by the for block below. - go func() { - s.logger.Info("starting gRPC server...", "address", s.config.Address) - errCh <- s.grpcSrv.Serve(listener) - }() - - // Start a blocking select to wait for an indication to stop the server or that - // the server failed to start properly. - err = <-errCh - s.logger.Error("failed to start gRPC server", "err", err) - return err -} - -func (s *Server[T]) Stop(ctx context.Context) error { - if !s.config.Enable { - return nil - } - - s.logger.Info("stopping gRPC server...", "address", s.config.Address) - s.grpcSrv.GracefulStop() - - return nil -} diff --git a/server/v2/api/grpcgateway/config.go b/server/v2/api/grpcgateway/config.go deleted file mode 100644 index c5ccb3bfe2..0000000000 --- a/server/v2/api/grpcgateway/config.go +++ /dev/null @@ -1,28 +0,0 @@ -package grpcgateway - -func DefaultConfig() *Config { - return &Config{ - Enable: true, - } -} - -type Config struct { - // Enable defines if the gRPC-gateway should be enabled. - Enable bool `mapstructure:"enable" toml:"enable" comment:"Enable defines if the gRPC-gateway should be enabled."` -} - -type CfgOption func(*Config) - -// OverwriteDefaultConfig overwrites the default config with the new config. -func OverwriteDefaultConfig(newCfg *Config) CfgOption { - return func(cfg *Config) { - *cfg = *newCfg - } -} - -// Disable the grpc server by default (default enabled). -func Disable() CfgOption { - return func(cfg *Config) { - cfg.Enable = false - } -} diff --git a/server/v2/api/grpcgateway/server.go b/server/v2/api/grpcgateway/server.go deleted file mode 100644 index 028027a83a..0000000000 --- a/server/v2/api/grpcgateway/server.go +++ /dev/null @@ -1,145 +0,0 @@ -package grpcgateway - -import ( - "context" - "fmt" - "net/http" - "strings" - - gateway "github.com/cosmos/gogogateway" - "github.com/cosmos/gogoproto/jsonpb" - "github.com/gorilla/mux" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/spf13/viper" - "google.golang.org/grpc" - - "cosmossdk.io/core/transaction" - "cosmossdk.io/log" - serverv2 "cosmossdk.io/server/v2" -) - -var _ serverv2.ServerComponent[transaction.Tx] = (*GRPCGatewayServer[transaction.Tx])(nil) - -const ( - ServerName = "grpc-gateway" - - // GRPCBlockHeightHeader is the gRPC header for block height. - GRPCBlockHeightHeader = "x-cosmos-block-height" -) - -type GRPCGatewayServer[T transaction.Tx] struct { - logger log.Logger - config *Config - cfgOptions []CfgOption - - GRPCSrv *grpc.Server - GRPCGatewayRouter *runtime.ServeMux -} - -// New creates a new gRPC-gateway server. -func New[T transaction.Tx](grpcSrv *grpc.Server, ir jsonpb.AnyResolver, cfgOptions ...CfgOption) *GRPCGatewayServer[T] { - // The default JSON marshaller used by the gRPC-Gateway is unable to marshal non-nullable non-scalar fields. - // Using the gogo/gateway package with the gRPC-Gateway WithMarshaler option fixes the scalar field marshaling issue. - marshalerOption := &gateway.JSONPb{ - EmitDefaults: true, - Indent: "", - OrigName: true, - AnyResolver: ir, - } - - return &GRPCGatewayServer[T]{ - GRPCSrv: grpcSrv, - GRPCGatewayRouter: runtime.NewServeMux( - // Custom marshaler option is required for gogo proto - runtime.WithMarshalerOption(runtime.MIMEWildcard, marshalerOption), - - // This is necessary to get error details properly - // marshaled in unary requests. - runtime.WithProtoErrorHandler(runtime.DefaultHTTPProtoErrorHandler), - - // Custom header matcher for mapping request headers to - // GRPC metadata - runtime.WithIncomingHeaderMatcher(CustomGRPCHeaderMatcher), - ), - cfgOptions: cfgOptions, - } -} - -func (g *GRPCGatewayServer[T]) Name() string { - return ServerName -} - -func (s *GRPCGatewayServer[T]) Config() any { - if s.config == nil || s.config == (&Config{}) { - cfg := DefaultConfig() - // overwrite the default config with the provided options - for _, opt := range s.cfgOptions { - opt(cfg) - } - - return cfg - } - - return s.config -} - -func (s *GRPCGatewayServer[T]) Init(appI serverv2.AppI[transaction.Tx], v *viper.Viper, logger log.Logger) error { - cfg := s.Config().(*Config) - if v != nil { - if err := serverv2.UnmarshalSubConfig(v, s.Name(), &cfg); err != nil { - return fmt.Errorf("failed to unmarshal config: %w", err) - } - } - - // Register the gRPC-Gateway server. - // appI.RegisterGRPCGatewayRoutes(s.GRPCGatewayRouter, s.GRPCSrv) - - s.logger = logger - s.config = cfg - - return nil -} - -func (s *GRPCGatewayServer[T]) Start(ctx context.Context) error { - if !s.config.Enable { - return nil - } - - // TODO start a normal Go http server (and do not leverage comet's like https://github.com/cosmos/cosmos-sdk/blob/9df6019de6ee7999fe9864bac836deb2f36dd44a/server/api/server.go#L98) - - return nil -} - -func (s *GRPCGatewayServer[T]) Stop(ctx context.Context) error { - if !s.config.Enable { - return nil - } - - return nil -} - -// Register implements registers a grpc-gateway server -func (s *GRPCGatewayServer[T]) Register(r mux.Router) error { - // configure grpc-gatway server - r.PathPrefix("/").Handler(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - // Fall back to grpc gateway server. - s.GRPCGatewayRouter.ServeHTTP(w, req) - })) - - return nil -} - -// CustomGRPCHeaderMatcher for mapping request headers to -// GRPC metadata. -// HTTP headers that start with 'Grpc-Metadata-' are automatically mapped to -// gRPC metadata after removing prefix 'Grpc-Metadata-'. We can use this -// CustomGRPCHeaderMatcher if headers don't start with `Grpc-Metadata-` -func CustomGRPCHeaderMatcher(key string) (string, bool) { - switch strings.ToLower(key) { - case GRPCBlockHeightHeader: - return GRPCBlockHeightHeader, true - - default: - return runtime.DefaultHeaderMatcher(key) - } -} diff --git a/server/v2/api/telemetry/config.go b/server/v2/api/telemetry/config.go deleted file mode 100644 index 63a37ed1f3..0000000000 --- a/server/v2/api/telemetry/config.go +++ /dev/null @@ -1,42 +0,0 @@ -package telemetry - -type Config struct { - // Prefixed with keys to separate services - ServiceName string `mapstructure:"service-name" toml:"service-name" comment:"Prefixed with keys to separate services."` - - // Enabled enables the application telemetry functionality. When enabled, - // an in-memory sink is also enabled by default. Operators may also enabled - // other sinks such as Prometheus. - Enabled bool `mapstructure:"enabled" toml:"enabled" comment:"Enabled enables the application telemetry functionality. When enabled, an in-memory sink is also enabled by default. Operators may also enabled other sinks such as Prometheus."` - - // Enable prefixing gauge values with hostname - EnableHostname bool `mapstructure:"enable-hostname" toml:"enable-hostname" comment:"Enable prefixing gauge values with hostname."` - - // Enable adding hostname to labels - EnableHostnameLabel bool `mapstructure:"enable-hostname-label" toml:"enable-hostname-label" comment:"Enable adding hostname to labels."` - - // Enable adding service to labels - EnableServiceLabel bool `mapstructure:"enable-service-label" toml:"enable-service-label" comment:"Enable adding service to labels."` - - // PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. - // It defines the retention duration in seconds. - PrometheusRetentionTime int64 `mapstructure:"prometheus-retention-time" toml:"prometheus-retention-time" comment:"PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. It defines the retention duration in seconds."` - - // GlobalLabels defines a global set of name/value label tuples applied to all - // metrics emitted using the wrapper functions defined in telemetry package. - // - // Example: - // [["chain_id", "cosmoshub-1"]] - GlobalLabels [][]string `mapstructure:"global-labels" toml:"global-labels" comment:"GlobalLabels defines a global set of name/value label tuples applied to all metrics emitted using the wrapper functions defined in telemetry package.\n Example:\n [[\"chain_id\", \"cosmoshub-1\"]]"` - - // MetricsSink defines the type of metrics backend to use. - MetricsSink string `mapstructure:"type" toml:"metrics-sink" comment:"MetricsSink defines the type of metrics backend to use. Default is in memory"` - - // StatsdAddr defines the address of a statsd server to send metrics to. - // Only utilized if MetricsSink is set to "statsd" or "dogstatsd". - StatsdAddr string `mapstructure:"statsd-addr" toml:"stats-addr" comment:"StatsdAddr defines the address of a statsd server to send metrics to. Only utilized if MetricsSink is set to \"statsd\" or \"dogstatsd\"."` - - // DatadogHostname defines the hostname to use when emitting metrics to - // Datadog. Only utilized if MetricsSink is set to "dogstatsd". - DatadogHostname string `mapstructure:"datadog-hostname" toml:"data-dog-hostname" comment:"DatadogHostname defines the hostname to use when emitting metrics to Datadog. Only utilized if MetricsSink is set to \"dogstatsd\"."` -} diff --git a/server/v2/api/telemetry/metrics.go b/server/v2/api/telemetry/metrics.go deleted file mode 100644 index 78fe6388ca..0000000000 --- a/server/v2/api/telemetry/metrics.go +++ /dev/null @@ -1,188 +0,0 @@ -package telemetry - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/hashicorp/go-metrics" - "github.com/hashicorp/go-metrics/datadog" - metricsprom "github.com/hashicorp/go-metrics/prometheus" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/expfmt" -) - -// GlobalLabels defines the set of global labels that will be applied to all -// metrics emitted using the telemetry package function wrappers. -var GlobalLabels = []metrics.Label{} // nolint: ignore // false positive - -// NewLabel creates a new instance of Label with name and value -func NewLabel(name, value string) metrics.Label { - return metrics.Label{Name: name, Value: value} -} - -// Metrics supported format types. -const ( - FormatDefault = "" - FormatPrometheus = "prometheus" - FormatText = "text" - ContentTypeText = `text/plain; version=` + expfmt.TextVersion + `; charset=utf-8` - - MetricSinkInMem = "mem" - MetricSinkStatsd = "statsd" - MetricSinkDogsStatsd = "dogstatsd" -) - -// DisplayableSink is an interface that defines a method for displaying metrics. -type DisplayableSink interface { - DisplayMetrics(resp http.ResponseWriter, req *http.Request) (any, error) -} - -// Metrics defines a wrapper around application telemetry functionality. It allows -// metrics to be gathered at any point in time. When creating a Metrics object, -// internally, a global metrics is registered with a set of sinks as configured -// by the operator. In addition to the sinks, when a process gets a SIGUSR1, a -// dump of formatted recent metrics will be sent to STDERR. -type Metrics struct { - sink metrics.MetricSink - prometheusEnabled bool -} - -// GatherResponse is the response type of registered metrics -type GatherResponse struct { - Metrics []byte - ContentType string -} - -// New creates a new instance of Metrics -func New(cfg Config) (_ *Metrics, rerr error) { - if !cfg.Enabled { - return nil, nil - } - - if numGlobalLabels := len(cfg.GlobalLabels); numGlobalLabels > 0 { - parsedGlobalLabels := make([]metrics.Label, numGlobalLabels) - for i, gl := range cfg.GlobalLabels { - parsedGlobalLabels[i] = NewLabel(gl[0], gl[1]) - } - GlobalLabels = parsedGlobalLabels - } - - metricsConf := metrics.DefaultConfig(cfg.ServiceName) - metricsConf.EnableHostname = cfg.EnableHostname - metricsConf.EnableHostnameLabel = cfg.EnableHostnameLabel - - var ( - sink metrics.MetricSink - err error - ) - switch cfg.MetricsSink { - case MetricSinkStatsd: - sink, err = metrics.NewStatsdSink(cfg.StatsdAddr) - case MetricSinkDogsStatsd: - sink, err = datadog.NewDogStatsdSink(cfg.StatsdAddr, cfg.DatadogHostname) - default: - memSink := metrics.NewInmemSink(10*time.Second, time.Minute) - sink = memSink - inMemSig := metrics.DefaultInmemSignal(memSink) - defer func() { - if rerr != nil { - inMemSig.Stop() - } - }() - } - - if err != nil { - return nil, err - } - - m := &Metrics{sink: sink} - fanout := metrics.FanoutSink{sink} - - if cfg.PrometheusRetentionTime > 0 { - m.prometheusEnabled = true - prometheusOpts := metricsprom.PrometheusOpts{ - Expiration: time.Duration(cfg.PrometheusRetentionTime) * time.Second, - } - - promSink, err := metricsprom.NewPrometheusSinkFrom(prometheusOpts) - if err != nil { - return nil, err - } - - fanout = append(fanout, promSink) - } - - if _, err := metrics.NewGlobal(metricsConf, fanout); err != nil { - return nil, err - } - - return m, nil -} - -// Gather collects all registered metrics and returns a GatherResponse where the -// metrics are encoded depending on the type. Metrics are either encoded via -// Prometheus or JSON if in-memory. -func (m *Metrics) Gather(format string) (GatherResponse, error) { - switch format { - case FormatPrometheus: - return m.gatherPrometheus() - - case FormatText: - return m.gatherGeneric() - - case FormatDefault: - return m.gatherGeneric() - - default: - return GatherResponse{}, fmt.Errorf("unsupported metrics format: %s", format) - } -} - -// gatherPrometheus collects Prometheus metrics and returns a GatherResponse. -// If Prometheus metrics are not enabled, it returns an error. -func (m *Metrics) gatherPrometheus() (GatherResponse, error) { - if !m.prometheusEnabled { - return GatherResponse{}, fmt.Errorf("prometheus metrics are not enabled") - } - - metricsFamilies, err := prometheus.DefaultGatherer.Gather() - if err != nil { - return GatherResponse{}, fmt.Errorf("failed to gather prometheus metrics: %w", err) - } - - buf := &bytes.Buffer{} - defer buf.Reset() - - e := expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeTextPlain)) - - for _, mf := range metricsFamilies { - if err := e.Encode(mf); err != nil { - return GatherResponse{}, fmt.Errorf("failed to encode prometheus metrics: %w", err) - } - } - - return GatherResponse{ContentType: ContentTypeText, Metrics: buf.Bytes()}, nil -} - -// gatherGeneric collects generic metrics and returns a GatherResponse. -func (m *Metrics) gatherGeneric() (GatherResponse, error) { - gm, ok := m.sink.(DisplayableSink) - if !ok { - return GatherResponse{}, fmt.Errorf("non in-memory metrics sink does not support generic format") - } - - summary, err := gm.DisplayMetrics(nil, nil) - if err != nil { - return GatherResponse{}, fmt.Errorf("failed to gather in-memory metrics: %w", err) - } - - content, err := json.Marshal(summary) - if err != nil { - return GatherResponse{}, fmt.Errorf("failed to encode in-memory metrics: %w", err) - } - - return GatherResponse{ContentType: "application/json", Metrics: content}, nil -} diff --git a/server/v2/api/telemetry/server.go b/server/v2/api/telemetry/server.go deleted file mode 100644 index a944fc7f4f..0000000000 --- a/server/v2/api/telemetry/server.go +++ /dev/null @@ -1,47 +0,0 @@ -package telemetry - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func RegisterMetrics(r mux.Router, cfg Config) (*Metrics, error) { - m, err := New(cfg) - if err != nil { - return nil, err - } - - metricsHandler := func(w http.ResponseWriter, r *http.Request) { - format := strings.TrimSpace(r.FormValue("format")) - - gr, err := m.Gather(format) - if err != nil { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusBadRequest) - bz, err := json.Marshal(errorResponse{Code: 400, Error: fmt.Sprintf("failed to gather metrics: %s", err)}) - if err != nil { - return - } - _, _ = w.Write(bz) - - return - } - - w.Header().Set("Content-Type", gr.ContentType) - _, _ = w.Write(gr.Metrics) - } - - r.HandleFunc("/metrics", metricsHandler).Methods("GET") - - return m, nil -} - -// errorResponse defines the attributes of a JSON error response. -type errorResponse struct { - Code int `json:"code,omitempty"` - Error string `json:"error"` -} diff --git a/server/v2/appmanager/appmanager.go b/server/v2/appmanager/appmanager.go deleted file mode 100644 index d9c84c5035..0000000000 --- a/server/v2/appmanager/appmanager.go +++ /dev/null @@ -1,172 +0,0 @@ -package appmanager - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - appmanager "cosmossdk.io/core/app" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/core/transaction" -) - -// Store defines the underlying storage behavior needed by AppManager. -type Store interface { - // StateLatest returns a readonly view over the latest - // committed state of the store. Alongside the version - // associated with it. - StateLatest() (uint64, corestore.ReaderMap, error) - - // StateAt returns a readonly view over the provided - // state. Must error when the version does not exist. - StateAt(version uint64) (corestore.ReaderMap, error) -} - -// AppManager is a coordinator for all things related to an application -type AppManager[T transaction.Tx] struct { - config Config - - db Store - - initGenesis InitGenesis - exportGenesis ExportGenesis - - stf StateTransitionFunction[T] -} - -func (a AppManager[T]) InitGenesis( - ctx context.Context, - blockRequest *appmanager.BlockRequest[T], - initGenesisJSON []byte, - txDecoder transaction.Codec[T], -) (*appmanager.BlockResponse, corestore.WriterMap, error) { - v, zeroState, err := a.db.StateLatest() - if err != nil { - return nil, nil, fmt.Errorf("unable to get latest state: %w", err) - } - if v != 0 { // TODO: genesis state may be > 0, we need to set version on store - return nil, nil, fmt.Errorf("cannot init genesis on non-zero state") - } - - var genTxs []T - genesisState, err := a.stf.RunWithCtx(ctx, zeroState, func(ctx context.Context) error { - return a.initGenesis(ctx, bytes.NewBuffer(initGenesisJSON), func(jsonTx json.RawMessage) error { - genTx, err := txDecoder.DecodeJSON(jsonTx) - if err != nil { - return fmt.Errorf("failed to decode genesis transaction: %w", err) - } - genTxs = append(genTxs, genTx) - return nil - }) - }) - if err != nil { - return nil, nil, fmt.Errorf("failed to import genesis state: %w", err) - } - // run block - // TODO: in an ideal world, genesis state is simply an initial state being applied - // unaware of what that state means in relation to every other - blockRequest.Txs = genTxs - - blockResponse, blockZeroState, err := a.stf.DeliverBlock(ctx, blockRequest, genesisState) - if err != nil { - return blockResponse, nil, fmt.Errorf("failed to deliver block %d: %w", blockRequest.Height, err) - } - - // after executing block 0, we extract the changes and apply them to the genesis state. - blockZeroStateChanges, err := blockZeroState.GetStateChanges() - if err != nil { - return nil, nil, fmt.Errorf("failed to get block zero state changes: %w", err) - } - - err = genesisState.ApplyStateChanges(blockZeroStateChanges) - if err != nil { - return nil, nil, fmt.Errorf("failed to apply block zero state changes to genesis state: %w", err) - } - - return blockResponse, genesisState, err - // consensus server will need to set the version of the store -} - -// ExportGenesis exports the genesis state of the application. -func (a AppManager[T]) ExportGenesis(ctx context.Context, version uint64) ([]byte, error) { - bz, err := a.exportGenesis(ctx, version) - if err != nil { - return nil, fmt.Errorf("failed to export genesis state: %w", err) - } - - return bz, nil -} - -func (a AppManager[T]) DeliverBlock( - ctx context.Context, - block *appmanager.BlockRequest[T], -) (*appmanager.BlockResponse, corestore.WriterMap, error) { - latestVersion, currentState, err := a.db.StateLatest() - if err != nil { - return nil, nil, fmt.Errorf("unable to create new state for height %d: %w", block.Height, err) - } - - if latestVersion+1 != block.Height { - return nil, nil, fmt.Errorf("invalid DeliverBlock height wanted %d, got %d", latestVersion+1, block.Height) - } - - blockResponse, newState, err := a.stf.DeliverBlock(ctx, block, currentState) - if err != nil { - return nil, nil, fmt.Errorf("block delivery failed: %w", err) - } - - return blockResponse, newState, nil -} - -// ValidateTx will validate the tx against the latest storage state. This means that -// only the stateful validation will be run, not the execution portion of the tx. -// If full execution is needed, Simulate must be used. -func (a AppManager[T]) ValidateTx(ctx context.Context, tx T) (appmanager.TxResult, error) { - _, latestState, err := a.db.StateLatest() - if err != nil { - return appmanager.TxResult{}, err - } - return a.stf.ValidateTx(ctx, latestState, a.config.ValidateTxGasLimit, tx), nil -} - -// Simulate runs validation and execution flow of a Tx. -func (a AppManager[T]) Simulate(ctx context.Context, tx T) (appmanager.TxResult, corestore.WriterMap, error) { - _, state, err := a.db.StateLatest() - if err != nil { - return appmanager.TxResult{}, nil, err - } - result, cs := a.stf.Simulate(ctx, state, a.config.SimulationGasLimit, tx) // TODO: check if this is done in the antehandler - return result, cs, nil -} - -// Query queries the application at the provided version. -// CONTRACT: Version must always be provided, if 0, get latest -func (a AppManager[T]) Query(ctx context.Context, version uint64, request transaction.Msg) (transaction.Msg, error) { - // if version is provided attempt to do a height query. - if version != 0 { - queryState, err := a.db.StateAt(version) - if err != nil { - return nil, err - } - return a.stf.Query(ctx, queryState, a.config.QueryGasLimit, request) - } - - // otherwise rely on latest available state. - _, queryState, err := a.db.StateLatest() - if err != nil { - return nil, err - } - return a.stf.Query(ctx, queryState, a.config.QueryGasLimit, request) -} - -// QueryWithState executes a query with the provided state. This allows to process a query -// independently of the db state. For example, it can be used to process a query with temporary -// and uncommitted state -func (a AppManager[T]) QueryWithState( - ctx context.Context, - state corestore.ReaderMap, - request transaction.Msg, -) (transaction.Msg, error) { - return a.stf.Query(ctx, state, a.config.QueryGasLimit, request) -} diff --git a/server/v2/appmanager/appmanager_builder.go b/server/v2/appmanager/appmanager_builder.go deleted file mode 100644 index b3671706c7..0000000000 --- a/server/v2/appmanager/appmanager_builder.go +++ /dev/null @@ -1,40 +0,0 @@ -package appmanager - -import ( - "cosmossdk.io/core/transaction" -) - -// Builder is a struct that represents the application builder for managing transactions. -// It contains various fields and methods for initializing the application and handling transactions. -type Builder[T transaction.Tx] struct { - STF StateTransitionFunction[T] // The state transition function for processing transactions. - DB Store // The database for storing application data. - - // Gas limits for validating, querying, and simulating transactions. - ValidateTxGasLimit uint64 - QueryGasLimit uint64 - SimulationGasLimit uint64 - - // InitGenesis is a function that initializes the application state from a genesis file. - // It takes a context, a source reader for the genesis file, and a transaction handler function. - InitGenesis InitGenesis - // ExportGenesis is a function that exports the application state to a genesis file. - // It takes a context and a version number for the genesis file. - ExportGenesis ExportGenesis -} - -// Build creates a new instance of AppManager with the provided configuration and returns it. -// It initializes the AppManager with the given database, export state, import state, initGenesis function, and state transition function. -func (b Builder[T]) Build() (*AppManager[T], error) { - return &AppManager[T]{ - config: Config{ - ValidateTxGasLimit: b.ValidateTxGasLimit, - QueryGasLimit: b.QueryGasLimit, - SimulationGasLimit: b.SimulationGasLimit, - }, - db: b.DB, - initGenesis: b.InitGenesis, - exportGenesis: b.ExportGenesis, - stf: b.STF, - }, nil -} diff --git a/server/v2/appmanager/config.go b/server/v2/appmanager/config.go deleted file mode 100644 index ae52849bf2..0000000000 --- a/server/v2/appmanager/config.go +++ /dev/null @@ -1,9 +0,0 @@ -package appmanager - -// Config represents the configuration options for the app manager. -// TODO: implement comments for toml -type Config struct { - ValidateTxGasLimit uint64 `mapstructure:"validate-tx-gas-limit"` // TODO: check how this works on app mempool - QueryGasLimit uint64 `mapstructure:"query-gas-limit"` - SimulationGasLimit uint64 `mapstructure:"simulation-gas-limit"` -} diff --git a/server/v2/appmanager/genesis.go b/server/v2/appmanager/genesis.go deleted file mode 100644 index 8acad003b6..0000000000 --- a/server/v2/appmanager/genesis.go +++ /dev/null @@ -1,14 +0,0 @@ -package appmanager - -import ( - "context" - "encoding/json" - "io" -) - -type ( - // ExportGenesis is a function type that represents the export of the genesis state. - ExportGenesis func(ctx context.Context, version uint64) ([]byte, error) - // InitGenesis is a function type that represents the initialization of the genesis state. - InitGenesis func(ctx context.Context, src io.Reader, txHandler func(json.RawMessage) error) error -) diff --git a/server/v2/appmanager/go.mod b/server/v2/appmanager/go.mod deleted file mode 100644 index 266c4eb8c4..0000000000 --- a/server/v2/appmanager/go.mod +++ /dev/null @@ -1,14 +0,0 @@ -module cosmossdk.io/server/v2/appmanager - -go 1.21 - -replace cosmossdk.io/core => ../../../core - -require cosmossdk.io/core v0.12.0 - -require ( - github.com/cosmos/gogoproto v1.5.0 // indirect - github.com/google/go-cmp v0.6.0 // indirect - golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f // indirect - google.golang.org/protobuf v1.34.2 // indirect -) diff --git a/server/v2/appmanager/go.sum b/server/v2/appmanager/go.sum deleted file mode 100644 index f9ca7916a7..0000000000 --- a/server/v2/appmanager/go.sum +++ /dev/null @@ -1,10 +0,0 @@ -github.com/cosmos/gogoproto v1.5.0 h1:SDVwzEqZDDBoslaeZg+dGE55hdzHfgUA40pEanMh52o= -github.com/cosmos/gogoproto v1.5.0/go.mod h1:iUM31aofn3ymidYG6bUR5ZFrk+Om8p5s754eMUcyp8I= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f h1:3CW0unweImhOzd5FmYuRsD4Y4oQFKZIjAnKbjV4WIrw= -golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= diff --git a/server/v2/appmanager/types.go b/server/v2/appmanager/types.go deleted file mode 100644 index 760637dbcd..0000000000 --- a/server/v2/appmanager/types.go +++ /dev/null @@ -1,51 +0,0 @@ -package appmanager - -import ( - "context" - - appmanager "cosmossdk.io/core/app" - "cosmossdk.io/core/store" - "cosmossdk.io/core/transaction" -) - -// StateTransitionFunction is an interface for processing transactions and blocks. -type StateTransitionFunction[T transaction.Tx] interface { - // DeliverBlock executes a block of transactions. - DeliverBlock( - ctx context.Context, - block *appmanager.BlockRequest[T], - state store.ReaderMap, - ) (blockResult *appmanager.BlockResponse, newState store.WriterMap, err error) - - // ValidateTx validates a transaction. - ValidateTx( - ctx context.Context, - state store.ReaderMap, - gasLimit uint64, - tx T, - ) appmanager.TxResult - - // Simulate executes a transaction in simulation mode. - Simulate( - ctx context.Context, - state store.ReaderMap, - gasLimit uint64, - tx T, - ) (appmanager.TxResult, store.WriterMap) - - // Query executes a query on the application. - Query( - ctx context.Context, - state store.ReaderMap, - gasLimit uint64, - req transaction.Msg, - ) (transaction.Msg, error) - - // RunWithCtx executes the provided closure within a context. - // TODO: remove - RunWithCtx( - ctx context.Context, - state store.ReaderMap, - closure func(ctx context.Context) error, - ) (store.WriterMap, error) -} diff --git a/server/v2/cometbft/abci.go b/server/v2/cometbft/abci.go index cf6a0f5b60..45b1d962f8 100644 --- a/server/v2/cometbft/abci.go +++ b/server/v2/cometbft/abci.go @@ -526,7 +526,7 @@ func (c *Consensus[T]) VerifyVoteExtension( } if c.verifyVoteExt == nil { - return nil, fmt.Errorf("vote extensions are enabled but no verify function was set") + return nil, errors.New("vote extensions are enabled but no verify function was set") } _, latestStore, err := c.store.StateLatest() @@ -562,7 +562,7 @@ func (c *Consensus[T]) ExtendVote(ctx context.Context, req *abciproto.ExtendVote } if c.verifyVoteExt == nil { - return nil, fmt.Errorf("vote extensions are enabled but no verify function was set") + return nil, errors.New("vote extensions are enabled but no verify function was set") } _, latestStore, err := c.store.StateLatest() diff --git a/server/v2/cometbft/commands.go b/server/v2/cometbft/commands.go index 16c1e30905..26d1305895 100644 --- a/server/v2/cometbft/commands.go +++ b/server/v2/cometbft/commands.go @@ -2,6 +2,7 @@ package cometbft import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -253,7 +254,7 @@ $ %s query block --%s=%s switch typ { case TypeHeight: if args[0] == "" { - return fmt.Errorf("argument should be a block height") + return errors.New("argument should be a block height") } // optional height @@ -284,7 +285,7 @@ $ %s query block --%s=%s case TypeHash: if args[0] == "" { - return fmt.Errorf("argument should be a tx hash") + return errors.New("argument should be a tx hash") } // If hash is given, then query the tx by hash. diff --git a/server/v2/cometbft/go.mod b/server/v2/cometbft/go.mod index 6165bca6db..076ab6a5cc 100644 --- a/server/v2/cometbft/go.mod +++ b/server/v2/cometbft/go.mod @@ -6,10 +6,6 @@ replace ( cosmossdk.io/api => ../../../api cosmossdk.io/core => ../../../core cosmossdk.io/core/testing => ../../../core/testing - cosmossdk.io/server/v2 => ../ - cosmossdk.io/server/v2/appmanager => ../appmanager - cosmossdk.io/store => ../../../store - cosmossdk.io/store/v2 => ../../../store/v2 cosmossdk.io/x/accounts => ../../../x/accounts cosmossdk.io/x/auth => ../../../x/auth cosmossdk.io/x/bank => ../../../x/bank @@ -25,14 +21,14 @@ require ( cosmossdk.io/core v0.12.1-0.20231114100755-569e3ff6a0d7 cosmossdk.io/errors v1.0.1 cosmossdk.io/log v1.3.1 - cosmossdk.io/server/v2 v2.0.0-00010101000000-000000000000 - cosmossdk.io/server/v2/appmanager v0.0.0-00010101000000-000000000000 - cosmossdk.io/store/v2 v2.0.0-00010101000000-000000000000 + cosmossdk.io/server/v2 v2.0.0-20240731205446-aee9803a0af6 // main + cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6 // main + cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6 // main cosmossdk.io/x/consensus v0.0.0-00010101000000-000000000000 github.com/cometbft/cometbft v1.0.0-rc1 github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.53.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 @@ -49,6 +45,7 @@ require ( buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.34.2-20240130113600-88ef6483f90f.2 // indirect cosmossdk.io/collections v0.4.0 // indirect cosmossdk.io/depinject v1.0.0 // indirect + cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 // indirect cosmossdk.io/math v1.3.0 // indirect cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // indirect cosmossdk.io/x/auth v0.0.0-00010101000000-000000000000 // indirect diff --git a/server/v2/cometbft/go.sum b/server/v2/cometbft/go.sum index 954a349c21..fca072ee44 100644 --- a/server/v2/cometbft/go.sum +++ b/server/v2/cometbft/go.sum @@ -10,12 +10,22 @@ cosmossdk.io/depinject v1.0.0 h1:dQaTu6+O6askNXO06+jyeUAnF2/ssKwrrszP9t5q050= cosmossdk.io/depinject v1.0.0/go.mod h1:zxK/h3HgHoA/eJVtiSsoaRaRA2D5U4cJ5thIG4ssbB8= cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 h1:IQNdY2kB+k+1OM2DvqFG1+UgeU1JzZrWtwuWzI3ZfwA= +cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5/go.mod h1:0CuYKkFHxc1vw2JC+t21THBCALJVROrWVR/3PQ1urpc= cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/server/v2 v2.0.0-20240731205446-aee9803a0af6 h1:r2BXi/s99Mq1ShLmP4QTlcUbMvVPKTMQztSbevu6Xeo= +cosmossdk.io/server/v2 v2.0.0-20240731205446-aee9803a0af6/go.mod h1:alRmtz2gedZe+goFHbNjkBPNTkShFW6HEeXiyT7hdHM= +cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6 h1:vrHmVjfEjEwQh90dim272gYq7OFILg4Yrv3XzreMpe4= +cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6/go.mod h1:Xm5IOSjw45Sew7fiVckaTCIU5oQPs20V+54NOqR3H4o= +cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc h1:R9O9d75e0qZYUsVV0zzi+D7cNLnX2JrUOQNoIPaF0Bg= +cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc/go.mod h1:amTTatOUV3u1PsKmNb87z6/galCxrRbz9kRdJkL0DyU= +cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6 h1:/ffIfMKzoCVUI38t5Vq3BNW9U8exRMxK5QgS/ujn0lA= +cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6/go.mod h1:aG3brMLcldPsdhfkdCaisGDIe+tXTNWdUDt5JYsRDl8= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= diff --git a/server/v2/cometbft/handlers/defaults.go b/server/v2/cometbft/handlers/defaults.go index f7e32f64fa..c16064974b 100644 --- a/server/v2/cometbft/handlers/defaults.go +++ b/server/v2/cometbft/handlers/defaults.go @@ -140,7 +140,7 @@ func (h *DefaultProposalHandler[T]) ProcessHandler() ProcessHandler[T] { if maxBlockGas > 0 { gaslimit, err := tx.GetGasLimit() if err != nil { - return fmt.Errorf("failed to get gas limit") + return errors.New("failed to get gas limit") } totalTxGas += gaslimit if totalTxGas > maxBlockGas { diff --git a/server/v2/cometbft/server.go b/server/v2/cometbft/server.go index 08f8e51807..c85a241566 100644 --- a/server/v2/cometbft/server.go +++ b/server/v2/cometbft/server.go @@ -74,6 +74,7 @@ func (s *CometBFTServer[T]) Init(appI serverv2.AppI[T], v *viper.Viper, logger l } s.logger = logger.With(log.ModuleKey, s.Name()) + store := appI.GetStore().(types.Store) consensus := NewConsensus( s.logger, appI.Name(), @@ -82,7 +83,7 @@ func (s *CometBFTServer[T]) Init(appI serverv2.AppI[T], v *viper.Viper, logger l s.serverOptions.Mempool, indexEvents, appI.GetGPRCMethodsToMessageMap(), - appI.GetStore().(types.Store), + store, s.config, s.initTxCodec, ) @@ -93,9 +94,8 @@ func (s *CometBFTServer[T]) Init(appI serverv2.AppI[T], v *viper.Viper, logger l consensus.addrPeerFilter = s.serverOptions.AddrPeerFilter consensus.idPeerFilter = s.serverOptions.IdPeerFilter - // TODO: set these; what is the appropriate presence of the Store interface here? - var ss snapshots.StorageSnapshotter - var sc snapshots.CommitSnapshotter + ss := store.GetStateStorage().(snapshots.StorageSnapshotter) + sc := store.GetStateCommitment().(snapshots.CommitSnapshotter) snapshotStore, err := GetSnapshotStore(s.config.ConfigTomlConfig.RootDir) if err != nil { diff --git a/server/v2/cometbft/utils.go b/server/v2/cometbft/utils.go index 05299352b6..e97f09716d 100644 --- a/server/v2/cometbft/utils.go +++ b/server/v2/cometbft/utils.go @@ -2,6 +2,7 @@ package cometbft import ( "context" + "errors" "fmt" "math" "strings" @@ -296,7 +297,7 @@ func (c *Consensus[T]) GetConsensusParams(ctx context.Context) (*cmtproto.Consen } if r, ok := res.(*consensus.QueryParamsResponse); !ok { - return nil, fmt.Errorf("failed to query consensus params") + return nil, errors.New("failed to query consensus params") } else { // convert our params to cometbft params return r.Params, nil diff --git a/server/v2/commands.go b/server/v2/commands.go deleted file mode 100644 index a0b2b05ae9..0000000000 --- a/server/v2/commands.go +++ /dev/null @@ -1,212 +0,0 @@ -package serverv2 - -import ( - "context" - "errors" - "os" - "os/signal" - "path/filepath" - "strings" - "syscall" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - "cosmossdk.io/core/transaction" - "cosmossdk.io/log" -) - -// Execute executes the root command of an application. -// It handles adding core CLI flags, specifically the logging flags. -func Execute(rootCmd *cobra.Command, envPrefix, defaultHome string) error { - rootCmd.PersistentFlags().String(FlagLogLevel, "info", "The logging level (trace|debug|info|warn|error|fatal|panic|disabled or '*:,:')") - rootCmd.PersistentFlags().String(FlagLogFormat, "plain", "The logging format (json|plain)") - rootCmd.PersistentFlags().Bool(FlagLogNoColor, false, "Disable colored logs") - rootCmd.PersistentFlags().StringP(FlagHome, "", defaultHome, "directory for config and data") - - // update the global viper with the root command's configuration - viper.SetEnvPrefix(envPrefix) - viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) - viper.AutomaticEnv() - - return rootCmd.Execute() -} - -// AddCommands add the server commands to the root command -// It configure the config handling and the logger handling -func AddCommands[T transaction.Tx]( - rootCmd *cobra.Command, - newApp AppCreator[T], - logger log.Logger, - components ...ServerComponent[T], -) error { - if len(components) == 0 { - return errors.New("no components provided") - } - - server := NewServer(logger, components...) - originalPersistentPreRunE := rootCmd.PersistentPreRunE - rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { - // set the default command outputs - cmd.SetOut(cmd.OutOrStdout()) - cmd.SetErr(cmd.ErrOrStderr()) - - if err := configHandle(server, cmd); err != nil { - return err - } - - // call the original PersistentPreRun(E) if it exists - if rootCmd.PersistentPreRun != nil { - rootCmd.PersistentPreRun(cmd, args) - return nil - } - - return originalPersistentPreRunE(cmd, args) - } - - cmds := server.CLICommands() - startCmd := createStartCommand(server, newApp) - startCmd.SetContext(rootCmd.Context()) - cmds.Commands = append(cmds.Commands, startCmd) - rootCmd.AddCommand(cmds.Commands...) - - if len(cmds.Queries) > 0 { - if queryCmd := findSubCommand(rootCmd, "query"); queryCmd != nil { - queryCmd.AddCommand(cmds.Queries...) - } else { - queryCmd := topLevelCmd(rootCmd.Context(), "query", "Querying subcommands") - queryCmd.Aliases = []string{"q"} - queryCmd.AddCommand(cmds.Queries...) - rootCmd.AddCommand(queryCmd) - } - } - - if len(cmds.Txs) > 0 { - if txCmd := findSubCommand(rootCmd, "tx"); txCmd != nil { - txCmd.AddCommand(cmds.Txs...) - } else { - txCmd := topLevelCmd(rootCmd.Context(), "tx", "Transactions subcommands") - txCmd.AddCommand(cmds.Txs...) - rootCmd.AddCommand(txCmd) - } - } - - return nil -} - -// createStartCommand creates the start command for the application. -func createStartCommand[T transaction.Tx]( - server *Server[T], - newApp AppCreator[T], -) *cobra.Command { - flags := server.StartFlags() - - cmd := &cobra.Command{ - Use: "start", - Short: "Run the application", - RunE: func(cmd *cobra.Command, args []string) error { - v := GetViperFromCmd(cmd) - l := GetLoggerFromCmd(cmd) - if err := v.BindPFlags(cmd.Flags()); err != nil { - return err - } - - if err := server.Init(newApp(l, v), v, l); err != nil { - return err - } - - ctx, cancelFn := context.WithCancel(cmd.Context()) - go func() { - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - sig := <-sigCh - cancelFn() - cmd.Printf("caught %s signal\n", sig.String()) - - if err := server.Stop(ctx); err != nil { - cmd.PrintErrln("failed to stop servers:", err) - } - }() - - if err := server.Start(ctx); err != nil { - return err - } - - return nil - }, - } - - // add the start flags to the command - for _, startFlags := range flags { - cmd.Flags().AddFlagSet(startFlags) - } - - return cmd -} - -// configHandle writes the default config to the home directory if it does not exist and sets the server context -func configHandle[T transaction.Tx](s *Server[T], cmd *cobra.Command) error { - home, err := cmd.Flags().GetString(FlagHome) - if err != nil { - return err - } - - configDir := filepath.Join(home, "config") - - // we need to check app.toml as the config folder can already exist for the client.toml - if _, err := os.Stat(filepath.Join(configDir, "app.toml")); os.IsNotExist(err) { - if err = s.WriteConfig(configDir); err != nil { - return err - } - } - - v, err := ReadConfig(configDir) - if err != nil { - return err - } - - if err := v.BindPFlags(cmd.Flags()); err != nil { - return err - } - - log, err := NewLogger(v, cmd.OutOrStdout()) - if err != nil { - return err - } - - return SetCmdServerContext(cmd, v, log) -} - -// findSubCommand finds a sub-command of the provided command whose Use -// string is or begins with the provided subCmdName. -// It verifies the command's aliases as well. -func findSubCommand(cmd *cobra.Command, subCmdName string) *cobra.Command { - for _, subCmd := range cmd.Commands() { - use := subCmd.Use - if use == subCmdName || strings.HasPrefix(use, subCmdName+" ") { - return subCmd - } - - for _, alias := range subCmd.Aliases { - if alias == subCmdName || strings.HasPrefix(alias, subCmdName+" ") { - return subCmd - } - } - } - return nil -} - -// topLevelCmd creates a new top-level command with the provided name and -// description. The command will have DisableFlagParsing set to false and -// SuggestionsMinimumDistance set to 2. -func topLevelCmd(ctx context.Context, use, short string) *cobra.Command { - cmd := &cobra.Command{ - Use: use, - Short: short, - DisableFlagParsing: false, - SuggestionsMinimumDistance: 2, - } - cmd.SetContext(ctx) - - return cmd -} diff --git a/server/v2/config.go b/server/v2/config.go deleted file mode 100644 index 57cce302bd..0000000000 --- a/server/v2/config.go +++ /dev/null @@ -1,61 +0,0 @@ -package serverv2 - -import ( - "fmt" - "strings" - - "github.com/mitchellh/mapstructure" - "github.com/spf13/viper" -) - -// ReadConfig returns a viper instance of the config file -func ReadConfig(configPath string) (*viper.Viper, error) { - v := viper.New() - v.SetConfigType("toml") - v.SetConfigName("config") - v.AddConfigPath(configPath) - if err := v.ReadInConfig(); err != nil { - return nil, fmt.Errorf("failed to read config: %s: %w", configPath, err) - } - - v.SetConfigName("app") - if err := v.MergeInConfig(); err != nil { - return nil, fmt.Errorf("failed to merge configuration: %w", err) - } - - v.WatchConfig() - - return v, nil -} - -// UnmarshalSubConfig unmarshals the given subconfig from the viper instance. -// It unmarshals the config, env, flags into the target struct. -// Use this instead of viper.Sub because viper does not unmarshal flags. -func UnmarshalSubConfig(v *viper.Viper, subName string, target any) error { - var sub any - for k, val := range v.AllSettings() { - if strings.HasPrefix(k, subName) { - sub = val - } - } - - // Create a new decoder with custom decoding options - decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), - Result: target, - WeaklyTypedInput: true, - }) - if err != nil { - return fmt.Errorf("failed to create decoder: %w", err) - } - - // Decode the sub-configuration - if err := decoder.Decode(sub); err != nil { - return fmt.Errorf("failed to decode sub-configuration: %w", err) - } - - return nil -} diff --git a/server/v2/config_test.go b/server/v2/config_test.go deleted file mode 100644 index 24eb28bb75..0000000000 --- a/server/v2/config_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package serverv2_test - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - - serverv2 "cosmossdk.io/server/v2" - grpc "cosmossdk.io/server/v2/api/grpc" -) - -func TestReadConfig(t *testing.T) { - currentDir, err := os.Getwd() - require.NoError(t, err) - configPath := filepath.Join(currentDir, "testdata") - - v, err := serverv2.ReadConfig(configPath) - require.NoError(t, err) - - require.Equal(t, v.GetString("grpc.address"), grpc.DefaultConfig().Address) -} - -func TestUnmarshalSubConfig(t *testing.T) { - currentDir, err := os.Getwd() - require.NoError(t, err) - configPath := filepath.Join(currentDir, "testdata") - - v, err := serverv2.ReadConfig(configPath) - require.NoError(t, err) - - grpcConfig := grpc.DefaultConfig() - err = serverv2.UnmarshalSubConfig(v, "grpc", &grpcConfig) - require.NoError(t, err) - - require.True(t, grpc.DefaultConfig().Enable) - require.False(t, grpcConfig.Enable) -} diff --git a/server/v2/flags.go b/server/v2/flags.go deleted file mode 100644 index a86154eb76..0000000000 --- a/server/v2/flags.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package serverv2 defines constants for server configuration flags and output formats. -package serverv2 - -const ( - // FlagHome specifies the home directory flag. - FlagHome = "home" - - FlagLogLevel = "log_level" // Sets the logging level - FlagLogFormat = "log_format" // Specifies the log output format - FlagLogNoColor = "log_no_color" // Disables colored log output - FlagTrace = "trace" // Enables trace-level logging - - // OutputFormatJSON defines the JSON output format option. - OutputFormatJSON = "json" -) diff --git a/server/v2/go.mod b/server/v2/go.mod deleted file mode 100644 index 430030ba80..0000000000 --- a/server/v2/go.mod +++ /dev/null @@ -1,88 +0,0 @@ -module cosmossdk.io/server/v2 - -go 1.21 - -replace ( - cosmossdk.io/api => ../../api - cosmossdk.io/core => ../../core - cosmossdk.io/core/testing => ../../core/testing - cosmossdk.io/server/v2/appmanager => ./appmanager - cosmossdk.io/server/v2/stf => ./stf - cosmossdk.io/x/tx => ../../x/tx -) - -require ( - cosmossdk.io/api v0.7.5 - cosmossdk.io/core v0.12.1-0.20231114100755-569e3ff6a0d7 - cosmossdk.io/core/testing v0.0.0-00010101000000-000000000000 - cosmossdk.io/log v1.3.1 - cosmossdk.io/server/v2/appmanager v0.0.0-00010101000000-000000000000 - github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/gogogateway v1.2.0 - github.com/cosmos/gogoproto v1.5.0 - github.com/golang/protobuf v1.5.4 - github.com/gorilla/mux v1.8.1 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/go-hclog v1.6.2 - github.com/hashicorp/go-metrics v0.5.3 - github.com/hashicorp/go-plugin v1.6.0 - github.com/mitchellh/mapstructure v1.5.0 - github.com/pelletier/go-toml/v2 v2.2.2 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.55.0 - github.com/rs/zerolog v1.33.0 - github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.19.0 - github.com/stretchr/testify v1.9.0 - golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc - golang.org/x/sync v0.7.0 - google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.34.2 -) - -require ( - github.com/DataDog/datadog-go v3.2.0+incompatible // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gogo/googleapis v1.4.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jhump/protoreflect v1.15.3 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/tidwall/btree v1.7.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/server/v2/go.sum b/server/v2/go.sum deleted file mode 100644 index 6cb26cc434..0000000000 --- a/server/v2/go.sum +++ /dev/null @@ -1,397 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= -cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= -github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= -github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= -github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= -github.com/cosmos/gogoproto v1.5.0 h1:SDVwzEqZDDBoslaeZg+dGE55hdzHfgUA40pEanMh52o= -github.com/cosmos/gogoproto v1.5.0/go.mod h1:iUM31aofn3ymidYG6bUR5ZFrk+Om8p5s754eMUcyp8I= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= -github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= -github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= -github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc h1:O9NuF4s+E/PvMIy+9IUZB9znFwUIXEWSstNjek6VpVg= -golang.org/x/exp v0.0.0-20240531132922-fd00a4e0eefc/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 h1:SbSDUWW1PAO24TNpLdeheoYPd7kllICcLU52x6eD4kQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/server/v2/logger.go b/server/v2/logger.go deleted file mode 100644 index 51a64fdccd..0000000000 --- a/server/v2/logger.go +++ /dev/null @@ -1,44 +0,0 @@ -package serverv2 - -import ( - "io" - - "github.com/rs/zerolog" - "github.com/spf13/viper" - - "cosmossdk.io/log" -) - -// NewLogger creates a the default SDK logger. -// It reads the log level and format from the server context. -func NewLogger(v *viper.Viper, out io.Writer) (log.Logger, error) { - var opts []log.Option - if v.GetString(FlagLogFormat) == OutputFormatJSON { - opts = append(opts, log.OutputJSONOption()) - } - opts = append(opts, - log.ColorOption(!v.GetBool(FlagLogNoColor)), - log.TraceOption(v.GetBool(FlagTrace))) - - // check and set filter level or keys for the logger if any - logLvlStr := v.GetString(FlagLogLevel) - if logLvlStr == "" { - return log.NewLogger(out, opts...), nil - } - - logLvl, err := zerolog.ParseLevel(logLvlStr) - switch { - case err != nil: - // If the log level is not a valid zerolog level, then we try to parse it as a key filter. - filterFunc, err := log.ParseLogLevel(logLvlStr) - if err != nil { - return nil, err - } - - opts = append(opts, log.FilterOption(filterFunc)) - default: - opts = append(opts, log.LevelOption(logLvl)) - } - - return log.NewLogger(out, opts...), nil -} diff --git a/server/v2/server.go b/server/v2/server.go deleted file mode 100644 index 6e1a4e7114..0000000000 --- a/server/v2/server.go +++ /dev/null @@ -1,227 +0,0 @@ -package serverv2 - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/pelletier/go-toml/v2" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/spf13/viper" - "golang.org/x/sync/errgroup" - - "cosmossdk.io/core/transaction" - "cosmossdk.io/log" -) - -// ServerComponent is a server module that can be started and stopped. -type ServerComponent[T transaction.Tx] interface { - Name() string - - Start(context.Context) error - Stop(context.Context) error - Init(AppI[T], *viper.Viper, log.Logger) error -} - -// HasStartFlags is a server module that has start flags. -type HasStartFlags interface { - // StartCmdFlags returns server start flags. - // Those flags should be prefixed with the server name. - // They are then merged with the server config in one viper instance. - StartCmdFlags() *pflag.FlagSet -} - -// HasConfig is a server module that has a config. -type HasConfig interface { - Config() any -} - -// HasCLICommands is a server module that has CLI commands. -type HasCLICommands interface { - CLICommands() CLIConfig -} - -// CLIConfig defines the CLI configuration for a module server. -type CLIConfig struct { - // Commands defines the main command of a module server. - Commands []*cobra.Command - // Queries defines the query commands of a module server. - // Those commands are meant to be added in the root query command. - Queries []*cobra.Command - // Txs defines the tx commands of a module server. - // Those commands are meant to be added in the root tx command. - Txs []*cobra.Command -} - -var _ ServerComponent[transaction.Tx] = (*Server[transaction.Tx])(nil) - -type Server[T transaction.Tx] struct { - logger log.Logger - components []ServerComponent[T] -} - -func NewServer[T transaction.Tx]( - logger log.Logger, - components ...ServerComponent[T], -) *Server[T] { - return &Server[T]{ - logger: logger, - components: components, - } -} - -func (s *Server[T]) Name() string { - return "server" -} - -// Start starts all components concurrently. -func (s *Server[T]) Start(ctx context.Context) error { - s.logger.Info("starting servers...") - - g, ctx := errgroup.WithContext(ctx) - for _, mod := range s.components { - mod := mod - g.Go(func() error { - return mod.Start(ctx) - }) - } - - if err := g.Wait(); err != nil { - return fmt.Errorf("failed to start servers: %w", err) - } - - <-ctx.Done() - - return nil -} - -// Stop stops all components concurrently. -func (s *Server[T]) Stop(ctx context.Context) error { - s.logger.Info("stopping servers...") - - g, ctx := errgroup.WithContext(ctx) - for _, mod := range s.components { - mod := mod - g.Go(func() error { - return mod.Stop(ctx) - }) - } - - return g.Wait() -} - -// CLICommands returns all CLI commands of all components. -func (s *Server[T]) CLICommands() CLIConfig { - compart := func(name string, cmds ...*cobra.Command) *cobra.Command { - if len(cmds) == 1 && strings.HasPrefix(cmds[0].Use, name) { - return cmds[0] - } - - subCmd := &cobra.Command{ - Use: name, - Short: fmt.Sprintf("Commands from the %s server component", name), - } - subCmd.AddCommand(cmds...) - - return subCmd - } - - commands := CLIConfig{} - for _, mod := range s.components { - if climod, ok := mod.(HasCLICommands); ok { - srvCmd := climod.CLICommands() - - if len(srvCmd.Commands) > 0 { - commands.Commands = append(commands.Commands, compart(mod.Name(), srvCmd.Commands...)) - } - - if len(srvCmd.Txs) > 0 { - commands.Txs = append(commands.Txs, compart(mod.Name(), srvCmd.Txs...)) - } - - if len(srvCmd.Queries) > 0 { - commands.Queries = append(commands.Queries, compart(mod.Name(), srvCmd.Queries...)) - } - } - } - - return commands -} - -// Configs returns all configs of all server components. -func (s *Server[T]) Configs() map[string]any { - cfgs := make(map[string]any) - for _, mod := range s.components { - if configmod, ok := mod.(HasConfig); ok { - cfg := configmod.Config() - cfgs[mod.Name()] = cfg - } - } - - return cfgs -} - -// Init initializes all server components with the provided application, configuration, and logger. -// It returns an error if any component fails to initialize. -func (s *Server[T]) Init(appI AppI[T], v *viper.Viper, logger log.Logger) error { - var components []ServerComponent[T] - for _, mod := range s.components { - mod := mod - if err := mod.Init(appI, v, logger); err != nil { - return err - } - - components = append(components, mod) - } - - s.components = components - return nil -} - -// WriteConfig writes the config to the given path. -// Note: it does not use viper.WriteConfigAs because we do not want to store flag values in the config. -func (s *Server[T]) WriteConfig(configPath string) error { - cfgs := s.Configs() - b, err := toml.Marshal(cfgs) - if err != nil { - return err - } - - if _, err := os.Stat(configPath); os.IsNotExist(err) { - if err := os.MkdirAll(configPath, os.ModePerm); err != nil { - return err - } - } - - if err := os.WriteFile(filepath.Join(configPath, "app.toml"), b, 0o600); err != nil { - return fmt.Errorf("failed to write config: %w", err) - } - - for _, component := range s.components { - // undocumented interface to write the component default config in another file than app.toml - // it is used by cometbft for backward compatibility - // it should not be used by other components - if mod, ok := component.(interface{ WriteCustomConfigAt(string) error }); ok { - if err := mod.WriteCustomConfigAt(configPath); err != nil { - return err - } - } - } - - return nil -} - -// StartFlags returns all flags of all server components. -func (s *Server[T]) StartFlags() []*pflag.FlagSet { - flags := []*pflag.FlagSet{} - for _, mod := range s.components { - if startmod, ok := mod.(HasStartFlags); ok { - flags = append(flags, startmod.StartCmdFlags()) - } - } - - return flags -} diff --git a/server/v2/server_mock_test.go b/server/v2/server_mock_test.go deleted file mode 100644 index ae238d66a5..0000000000 --- a/server/v2/server_mock_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package serverv2_test - -import ( - "context" - "fmt" - "math/rand" - - "github.com/spf13/viper" - - "cosmossdk.io/core/transaction" - "cosmossdk.io/log" - serverv2 "cosmossdk.io/server/v2" -) - -type mockServerConfig struct { - MockFieldOne string `mapstructure:"mock_field" toml:"mock_field" comment:"Mock field"` - MockFieldTwo int `mapstructure:"mock_field_two" toml:"mock_field_two" comment:"Mock field two"` -} - -func MockServerDefaultConfig() *mockServerConfig { - return &mockServerConfig{ - MockFieldOne: "default", - MockFieldTwo: 1, - } -} - -type mockServer struct { - name string - ch chan string -} - -func (s *mockServer) Name() string { - return s.name -} - -func (s *mockServer) Init(appI serverv2.AppI[transaction.Tx], v *viper.Viper, logger log.Logger) error { - return nil -} - -func (s *mockServer) Start(ctx context.Context) error { - for ctx.Err() == nil { - s.ch <- fmt.Sprintf("%s mock server: %d", s.name, rand.Int()) - } - - return nil -} - -func (s *mockServer) Stop(ctx context.Context) error { - for range s.ch { - if str := <-s.ch; str != "" { - fmt.Printf("clearing %s\n", str) - } - } - - return nil -} - -func (s *mockServer) Config() any { - return MockServerDefaultConfig() -} diff --git a/server/v2/server_test.go b/server/v2/server_test.go deleted file mode 100644 index e757e7ecd5..0000000000 --- a/server/v2/server_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package serverv2_test - -import ( - "context" - "os" - "path/filepath" - "testing" - "time" - - gogoproto "github.com/cosmos/gogoproto/proto" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" - - coreapp "cosmossdk.io/core/app" - "cosmossdk.io/core/transaction" - "cosmossdk.io/log" - serverv2 "cosmossdk.io/server/v2" - grpc "cosmossdk.io/server/v2/api/grpc" - "cosmossdk.io/server/v2/appmanager" -) - -type mockInterfaceRegistry struct{} - -func (*mockInterfaceRegistry) Resolve(typeUrl string) (gogoproto.Message, error) { - panic("not implemented") -} - -func (*mockInterfaceRegistry) ListImplementations(ifaceTypeURL string) []string { - panic("not implemented") -} -func (*mockInterfaceRegistry) ListAllInterfaces() []string { panic("not implemented") } - -type mockApp[T transaction.Tx] struct { - serverv2.AppI[T] -} - -func (*mockApp[T]) GetGPRCMethodsToMessageMap() map[string]func() gogoproto.Message { - return map[string]func() gogoproto.Message{} -} - -func (*mockApp[T]) GetAppManager() *appmanager.AppManager[T] { - return nil -} - -func (*mockApp[T]) InterfaceRegistry() coreapp.InterfaceRegistry { - return &mockInterfaceRegistry{} -} - -func TestServer(t *testing.T) { - currentDir, err := os.Getwd() - require.NoError(t, err) - configPath := filepath.Join(currentDir, "testdata") - - v, err := serverv2.ReadConfig(configPath) - if err != nil { - v = viper.New() - } - - logger := log.NewLogger(os.Stdout) - grpcServer := grpc.New[transaction.Tx]() - err = grpcServer.Init(&mockApp[transaction.Tx]{}, v, logger) - require.NoError(t, err) - - mockServer := &mockServer{name: "mock-server-1", ch: make(chan string, 100)} - - server := serverv2.NewServer( - logger, - grpcServer, - mockServer, - ) - - serverCfgs := server.Configs() - require.Equal(t, serverCfgs[grpcServer.Name()].(*grpc.Config).Address, grpc.DefaultConfig().Address) - require.Equal(t, serverCfgs[mockServer.Name()].(*mockServerConfig).MockFieldOne, MockServerDefaultConfig().MockFieldOne) - - // write config - err = server.WriteConfig(configPath) - require.NoError(t, err) - - v, err = serverv2.ReadConfig(configPath) - require.NoError(t, err) - - require.Equal(t, v.GetString(grpcServer.Name()+".address"), grpc.DefaultConfig().Address) - - // start empty - ctx, cancelFn := context.WithCancel(context.TODO()) - go func() { - // wait 5sec and cancel context - <-time.After(5 * time.Second) - cancelFn() - - err = server.Stop(ctx) - require.NoError(t, err) - }() - - err = server.Start(ctx) - require.NoError(t, err) -} diff --git a/server/v2/stf/README.md b/server/v2/stf/README.md deleted file mode 100644 index 48c40ca75b..0000000000 --- a/server/v2/stf/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# State Transition Function (STF) - -STF is a function that takes a state and an action as input and returns the next state. It does not assume the execution model of the application nor consensus. - - -The state transition function receives a read only instance of state. It does not directly write to disk, instead it will return the state changes which has undergone within the application. The state transition function is deterministic, meaning that given the same input, it will always produce the same output. - -## BranchDB - -BranchDB is a cache of all the reads done within a block, simulation or transaction validation. It takes a read-only instance of state and creates its own write instance using a btree. After all state transitions are done, the new change sets are returned to the caller. - -The BranchDB can be replaced and optimized for specific use cases. The implementation is as follows - -```go - type branchdb func(state store.ReaderMap) store.WriterMap -``` - -## GasMeter - -GasMeter is a utility that keeps track of the gas consumed by the state transition function. It is used to limit the amount of computation that can be done within a block. - -The GasMeter can be replaced and optimized for specific use cases. The implementation is as follows: - -```go -type ( - // gasMeter is a function type that takes a gas limit as input and returns a gas.Meter. - // It is used to measure and limit the amount of gas consumed during the execution of a function. - gasMeter func(gasLimit uint64) gas.Meter - - // wrapGasMeter is a function type that wraps a gas meter and a store writer map. - wrapGasMeter func(meter gas.Meter, store store.WriterMap) store.WriterMap -) -``` - -THe wrappGasMeter is used in order to consume gas. Application developers can seamlsessly replace the gas meter with their own implementation in order to customize consumption of gas. diff --git a/server/v2/stf/branch/branch_test.go b/server/v2/stf/branch/branch_test.go deleted file mode 100644 index e306a2cadf..0000000000 --- a/server/v2/stf/branch/branch_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package branch - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/tidwall/btree" - - "cosmossdk.io/core/store" -) - -func TestBranch(t *testing.T) { - set := func(s interface{ Set([]byte, []byte) error }, key, value string) { - require.NoError(t, s.Set([]byte(key), []byte(value))) - } - get := func(s interface{ Get([]byte) ([]byte, error) }, key, wantValue string) { - value, err := s.Get([]byte(key)) - require.NoError(t, err) - if wantValue == "" { - require.Nil(t, value) - } else { - require.Equal(t, wantValue, string(value)) - } - } - - remove := func(s interface{ Delete([]byte) error }, key string) { - err := s.Delete([]byte(key)) - require.NoError(t, err) - } - - iter := func(s interface { - Iterator(start, end []byte) (store.Iterator, error) - }, start, end string, wantPairs [][2]string, - ) { - startKey := []byte(start) - endKey := []byte(end) - if start == "" { - startKey = nil - } - if end == "" { - endKey = nil - } - iter, err := s.Iterator(startKey, endKey) - require.NoError(t, err) - defer iter.Close() - numPairs := len(wantPairs) - for i := 0; i < numPairs; i++ { - require.True(t, iter.Valid(), "expected iterator to be valid") - gotKey, gotValue := string(iter.Key()), string(iter.Value()) - wantKey, wantValue := wantPairs[i][0], wantPairs[i][1] - require.Equal(t, wantKey, gotKey) - require.Equal(t, wantValue, gotValue) - iter.Next() - } - } - - parent := newMemState() - - // populate parent with some state - set(parent, "1", "a") - set(parent, "2", "b") - set(parent, "3", "c") - set(parent, "4", "d") - - branch := NewStore(parent) - - get(branch, "1", "a") // gets from parent - - set(branch, "1", "z") - get(branch, "1", "z") // gets updated value from branch - - set(branch, "5", "e") - get(branch, "5", "e") // gets updated value from branch - - remove(branch, "3") - get(branch, "3", "") // it's not fetched even if it exists in parent, it's not part of branch changeset currently. - - set(branch, "6", "f") - remove(branch, "6") - get(branch, "6", "") // inserted and then removed from branch - - // test iter - iter( - branch, - "", "", - [][2]string{ - {"1", "z"}, - {"2", "b"}, - {"4", "d"}, - {"5", "e"}, - }, - ) - - // test iter in range - iter( - branch, - "2", "4", - [][2]string{ - {"2", "b"}, - }, - ) - - // test reverse iter -} - -func newMemState() memStore { - return memStore{btree.NewBTreeGOptions(byKeys, btree.Options{Degree: bTreeDegree, NoLocks: true})} -} - -var _ store.Writer = memStore{} - -type memStore struct { - t *btree.BTreeG[item] -} - -func (m memStore) Set(key, value []byte) error { - m.t.Set(item{key: key, value: value}) - return nil -} - -func (m memStore) Delete(key []byte) error { - m.t.Delete(item{key: key}) - return nil -} - -func (m memStore) ApplyChangeSets(changes []store.KVPair) error { - panic("not callable") -} - -func (m memStore) ChangeSets() ([]store.KVPair, error) { panic("not callable") } - -func (m memStore) Has(key []byte) (bool, error) { - _, found := m.t.Get(item{key: key}) - return found, nil -} - -func (m memStore) Get(bytes []byte) ([]byte, error) { - v, found := m.t.Get(item{key: bytes}) - if !found { - return nil, nil - } - return v.value, nil -} - -func (m memStore) Iterator(start, end []byte) (store.Iterator, error) { - return newMemIterator(start, end, m.t, true), nil -} - -func (m memStore) ReverseIterator(start, end []byte) (store.Iterator, error) { - return newMemIterator(start, end, m.t, false), nil -} diff --git a/server/v2/stf/branch/changeset.go b/server/v2/stf/branch/changeset.go deleted file mode 100644 index 13c0167251..0000000000 --- a/server/v2/stf/branch/changeset.go +++ /dev/null @@ -1,230 +0,0 @@ -package branch - -import ( - "bytes" - "errors" - - "github.com/tidwall/btree" - - "cosmossdk.io/core/store" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - // copied from memdb. - bTreeDegree = 32 -) - -var errKeyEmpty = errors.New("key cannot be empty") - -// changeSet implements the sorted cache for cachekv store, -// we don't use MemDB here because cachekv is used extensively in sdk core path, -// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. -// -// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. -type changeSet struct { - tree *btree.BTreeG[item] -} - -// newChangeSet creates a wrapper around `btree.BTreeG`. -func newChangeSet() changeSet { - return changeSet{ - tree: btree.NewBTreeGOptions(byKeys, btree.Options{ - Degree: bTreeDegree, - NoLocks: true, - }), - } -} - -// set adds a new key-value pair to the change set's tree. -func (bt changeSet) set(key, value []byte) { - bt.tree.Set(newItem(key, value)) -} - -// get retrieves the value associated with the given key from the changeSet's tree. -func (bt changeSet) get(key []byte) (value []byte, found bool) { - it, found := bt.tree.Get(item{key: key}) - return it.value, found -} - -// delete removes the value associated with the given key from the change set. -// If the key does not exist in the change set, this method does nothing. -func (bt changeSet) delete(key []byte) { - bt.set(key, nil) -} - -// iterator returns a new iterator over the key-value pairs in the changeSet -// that have keys greater than or equal to the start key and less than the end key. -func (bt changeSet) iterator(start, end []byte) (store.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt.tree, true), nil -} - -// reverseIterator returns a new iterator that iterates over the key-value pairs in reverse order -// within the specified range [start, end) in the changeSet's tree. -// If start or end is an empty byte slice, it returns an error indicating that the key is empty. -func (bt changeSet) reverseIterator(start, end []byte) (store.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt.tree, false), nil -} - -// item is a btree item with byte slices as keys and values -type item struct { - key []byte - value []byte -} - -// byKeys compares the items by key -func byKeys(a, b item) bool { - return bytes.Compare(a.key, b.key) == -1 -} - -// newItem creates a new pair item. -func newItem(key, value []byte) item { - return item{key: key, value: value} -} - -// memIterator iterates over iterKVCache items. -// if value is nil, means it was deleted. -// Implements Iterator. -type memIterator struct { - iter btree.IterG[item] - - start []byte - end []byte - ascending bool - valid bool -} - -// newMemIterator creates a new memory iterator for a given range of keys in a B-tree. -// The iterator starts at the specified start key and ends at the specified end key. -// The `tree` parameter is the B-tree to iterate over. -// The `ascending` parameter determines the direction of iteration. -// If `ascending` is true, the iterator will iterate in ascending order. -// If `ascending` is false, the iterator will iterate in descending order. -// The returned iterator is positioned at the first key that is greater than or equal to the start key. -// If the start key is nil, the iterator is positioned at the first key in the B-tree. -// If the end key is nil, the iterator is positioned at the last key in the B-tree. -// The iterator is inclusive of the start key and exclusive of the end key. -// The `valid` field of the iterator indicates whether the iterator is positioned at a valid key. -// The `start` and `end` fields of the iterator store the start and end keys respectively. -func newMemIterator(start, end []byte, tree *btree.BTreeG[item], ascending bool) *memIterator { - iter := tree.Iter() - var valid bool - if ascending { - if start != nil { - valid = iter.Seek(newItem(start, nil)) - } else { - valid = iter.First() - } - } else { - if end != nil { - valid = iter.Seek(newItem(end, nil)) - if !valid { - valid = iter.Last() - } else { - // end is exclusive - valid = iter.Prev() - } - } else { - valid = iter.Last() - } - } - - mi := &memIterator{ - iter: iter, - start: start, - end: end, - ascending: ascending, - valid: valid, - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } - - return mi -} - -// Domain returns the start and end keys of the iterator's domain. -func (mi *memIterator) Domain() (start, end []byte) { - return mi.start, mi.end -} - -// Close releases any resources held by the iterator. -func (mi *memIterator) Close() error { - mi.iter.Release() - return nil -} - -// Error returns the error state of the iterator. -// If the iterator is not valid, it returns the errInvalidIterator error. -// Otherwise, it returns nil. -func (mi *memIterator) Error() error { - if !mi.Valid() { - return errInvalidIterator - } - return nil -} - -// Valid returns whether the iterator is currently pointing to a valid entry. -// It returns true if the iterator is valid, and false otherwise. -func (mi *memIterator) Valid() bool { - return mi.valid -} - -// Next advances the iterator to the next key-value pair. -// If the iterator is in ascending order, it moves to the next key-value pair. -// If the iterator is in descending order, it moves to the previous key-value pair. -// It also checks if the new key-value pair is within the specified range. -func (mi *memIterator) Next() { - mi.assertValid() - - if mi.ascending { - mi.valid = mi.iter.Next() - } else { - mi.valid = mi.iter.Prev() - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } -} - -// keyInRange checks if the given key is within the range defined by the iterator. -// If the iterator is in ascending order and the end key is not nil, it returns false -// if the key is greater than or equal to the end key. -// If the iterator is in descending order and the start key is not nil, it returns false -// if the key is less than the start key. -// Otherwise, it returns true. -func (mi *memIterator) keyInRange(key []byte) bool { - if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { - return false - } - if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 { - return false - } - return true -} - -// Key returns the key of the current item in the iterator. -func (mi *memIterator) Key() []byte { - return mi.iter.Item().key -} - -// Value returns the value of the current item in the iterator. -func (mi *memIterator) Value() []byte { - return mi.iter.Item().value -} - -// assertValid checks if the memIterator is in a valid state. -// If there is an error, it panics with the error message. -func (mi *memIterator) assertValid() { - if err := mi.Error(); err != nil { - panic(err) - } -} diff --git a/server/v2/stf/branch/defaults.go b/server/v2/stf/branch/defaults.go deleted file mode 100644 index 19f68933f9..0000000000 --- a/server/v2/stf/branch/defaults.go +++ /dev/null @@ -1,9 +0,0 @@ -package branch - -import "cosmossdk.io/core/store" - -func DefaultNewWriterMap(r store.ReaderMap) store.WriterMap { - return NewWriterMap(r, func(readonlyState store.Reader) store.Writer { - return NewStore(readonlyState) - }) -} diff --git a/server/v2/stf/branch/doc.go b/server/v2/stf/branch/doc.go deleted file mode 100644 index 9fc02d7261..0000000000 --- a/server/v2/stf/branch/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package branch defines a Store that can be used to wrap readable state to make it writable. -// Code heavily taken and adapted from cosmossdk.io/store/v1. -package branch diff --git a/server/v2/stf/branch/mergeiter.go b/server/v2/stf/branch/mergeiter.go deleted file mode 100644 index e71b88cffc..0000000000 --- a/server/v2/stf/branch/mergeiter.go +++ /dev/null @@ -1,235 +0,0 @@ -package branch - -import ( - "bytes" - "errors" - - corestore "cosmossdk.io/core/store" -) - -// mergedIterator merges a parent Iterator and a cache Iterator. -// The cache iterator may return nil keys to signal that an item -// had been deleted (but not deleted in the parent). -// If the cache iterator has the same key as the parent, the -// cache shadows (overrides) the parent. -type mergedIterator struct { - parent corestore.Iterator - cache corestore.Iterator - ascending bool - - valid bool -} - -var _ corestore.Iterator = (*mergedIterator)(nil) - -// mergeIterators merges two iterators. -func mergeIterators(parent, cache corestore.Iterator, ascending bool) corestore.Iterator { - iter := &mergedIterator{ - parent: parent, - cache: cache, - ascending: ascending, - } - - iter.valid = iter.skipUntilExistsOrInvalid() - return iter -} - -// Domain implements Iterator. -// Returns parent domain because cache and parent domains are the same. -func (iter *mergedIterator) Domain() (start, end []byte) { - return iter.parent.Domain() -} - -// Valid implements Iterator. -func (iter *mergedIterator) Valid() bool { - return iter.valid -} - -// Next implements Iterator -func (iter *mergedIterator) Next() { - iter.assertValid() - - switch { - case !iter.parent.Valid(): - // If parent is invalid, get the next cache item. - iter.cache.Next() - case !iter.cache.Valid(): - // If cache is invalid, get the next parent item. - iter.parent.Next() - default: - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - switch iter.compare(keyP, keyC) { - case -1: // parent < cache - iter.parent.Next() - case 0: // parent == cache - iter.parent.Next() - iter.cache.Next() - case 1: // parent > cache - iter.cache.Next() - } - } - iter.valid = iter.skipUntilExistsOrInvalid() -} - -// Key implements Iterator -func (iter *mergedIterator) Key() []byte { - iter.assertValid() - - // If parent is invalid, get the cache key. - if !iter.parent.Valid() { - return iter.cache.Key() - } - - // If cache is invalid, get the parent key. - if !iter.cache.Valid() { - return iter.parent.Key() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return keyP - case 0: // parent == cache - return keyP - case 1: // parent > cache - return keyC - default: - panic("invalid compare result") - } -} - -// Value implements Iterator -func (iter *mergedIterator) Value() []byte { - iter.assertValid() - - // If parent is invalid, get the cache value. - if !iter.parent.Valid() { - return iter.cache.Value() - } - - // If cache is invalid, get the parent value. - if !iter.cache.Valid() { - return iter.parent.Value() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return iter.parent.Value() - case 0: // parent == cache - return iter.cache.Value() - case 1: // parent > cache - return iter.cache.Value() - default: - panic("invalid comparison result") - } -} - -// Close implements Iterator -func (iter *mergedIterator) Close() error { - err1 := iter.cache.Close() - if err := iter.parent.Close(); err != nil { - return err - } - - return err1 -} - -var errInvalidIterator = errors.New("invalid merged iterator") - -// Error returns an error if the mergedIterator is invalid defined by the -// Valid method. -func (iter *mergedIterator) Error() error { - if !iter.Valid() { - return errInvalidIterator - } - - return nil -} - -// If not valid, panics. -// NOTE: May have side-effect of iterating over cache. -func (iter *mergedIterator) assertValid() { - if err := iter.Error(); err != nil { - panic(err) - } -} - -// Like bytes.Compare but opposite if not ascending. -func (iter *mergedIterator) compare(a, b []byte) int { - if iter.ascending { - return bytes.Compare(a, b) - } - - return bytes.Compare(a, b) * -1 -} - -// Skip all delete-items from the cache w/ `key < until`. After this function, -// current cache item is a non-delete-item, or `until <= key`. -// If the current cache item is not a delete item, does nothing. -// If `until` is nil, there is no limit, and cache may end up invalid. -// CONTRACT: cache is valid. -func (iter *mergedIterator) skipCacheDeletes(until []byte) { - for iter.cache.Valid() && - iter.cache.Value() == nil && - (until == nil || iter.compare(iter.cache.Key(), until) < 0) { - iter.cache.Next() - } -} - -// Fast forwards cache (or parent+cache in case of deleted items) until current -// item exists, or until iterator becomes invalid. -// Returns whether the iterator is valid. -func (iter *mergedIterator) skipUntilExistsOrInvalid() bool { - for { - // If parent is invalid, fast-forward cache. - if !iter.parent.Valid() { - iter.skipCacheDeletes(nil) - return iter.cache.Valid() - } - // Parent is valid. - - if !iter.cache.Valid() { - return true - } - // Parent is valid, cache is valid. - - // Compare parent and cache. - keyP := iter.parent.Key() - keyC := iter.cache.Key() - - switch iter.compare(keyP, keyC) { - case -1: // parent < cache. - return true - - case 0: // parent == cache. - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.parent.Next() - iter.cache.Next() - - continue - } - // Cache is not a delete. - - return true // cache exists. - case 1: // cache < parent - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.skipCacheDeletes(keyP) - continue - } - // Cache is not a delete. - return true // cache exists. - } - } -} diff --git a/server/v2/stf/branch/store.go b/server/v2/stf/branch/store.go deleted file mode 100644 index f0d6d0b3a1..0000000000 --- a/server/v2/stf/branch/store.go +++ /dev/null @@ -1,134 +0,0 @@ -package branch - -import ( - "errors" - - "cosmossdk.io/core/store" -) - -var _ store.Writer = (*Store[store.Reader])(nil) - -// Store wraps an in-memory cache around an underlying types.KVStore. -type Store[T store.Reader] struct { - changeSet changeSet // always ascending sorted - parent T -} - -// NewStore creates a new Store object -func NewStore[T store.Reader](parent T) Store[T] { - return Store[T]{ - changeSet: newChangeSet(), - parent: parent, - } -} - -// Get implements types.KVStore. -func (s Store[T]) Get(key []byte) (value []byte, err error) { - value, found := s.changeSet.get(key) - if found { - return - } - return s.parent.Get(key) -} - -// Set implements types.KVStore. -func (s Store[T]) Set(key, value []byte) error { - if value == nil { - return errors.New("cannot set a nil value") - } - - s.changeSet.set(key, value) - return nil -} - -// Has implements types.KVStore. -func (s Store[T]) Has(key []byte) (bool, error) { - tmpValue, found := s.changeSet.get(key) - if found { - return tmpValue != nil, nil - } - return s.parent.Has(key) -} - -// Delete implements types.KVStore. -func (s Store[T]) Delete(key []byte) error { - s.changeSet.delete(key) - return nil -} - -// ---------------------------------------- -// Iteration - -// Iterator implements types.KVStore. -func (s Store[T]) Iterator(start, end []byte) (store.Iterator, error) { - return s.iterator(start, end, true) -} - -// ReverseIterator implements types.KVStore. -func (s Store[T]) ReverseIterator(start, end []byte) (store.Iterator, error) { - return s.iterator(start, end, false) -} - -func (s Store[T]) iterator(start, end []byte, ascending bool) (store.Iterator, error) { - var ( - err error - parent, cache store.Iterator - ) - - if ascending { - parent, err = s.parent.Iterator(start, end) - if err != nil { - return nil, err - } - cache, err = s.changeSet.iterator(start, end) - if err != nil { - return nil, err - } - return mergeIterators(parent, cache, ascending), nil - } else { - parent, err = s.parent.ReverseIterator(start, end) - if err != nil { - return nil, err - } - cache, err = s.changeSet.reverseIterator(start, end) - if err != nil { - return nil, err - } - return mergeIterators(parent, cache, ascending), nil - } -} - -func (s Store[T]) ApplyChangeSets(changes []store.KVPair) error { - for _, c := range changes { - if c.Remove { - err := s.Delete(c.Key) - if err != nil { - return err - } - } else { - err := s.Set(c.Key, c.Value) - if err != nil { - return err - } - } - } - return nil -} - -func (s Store[T]) ChangeSets() (cs []store.KVPair, err error) { - iter, err := s.changeSet.iterator(nil, nil) - if err != nil { - return nil, err - } - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - k, v := iter.Key(), iter.Value() - cs = append(cs, store.KVPair{ - Key: k, - Value: v, - Remove: v == nil, // maybe we can optimistically compute size. - }) - } - return cs, nil -} diff --git a/server/v2/stf/branch/writer_map.go b/server/v2/stf/branch/writer_map.go deleted file mode 100644 index 244b51b3e7..0000000000 --- a/server/v2/stf/branch/writer_map.go +++ /dev/null @@ -1,82 +0,0 @@ -package branch - -import ( - "fmt" - "unsafe" - - "cosmossdk.io/core/store" -) - -func NewWriterMap( - state store.ReaderMap, - branch func(readonlyState store.Reader) store.Writer, -) store.WriterMap { - return WriterMap{ - state: state, - branchedWriterState: make(map[string]store.Writer), - branch: branch, - } -} - -// WriterMap implements a branched version of the store.WriterMap. -// After the firs time the actor's branched Store is created, it is -// memoized in the WriterMap. -type WriterMap struct { - state store.ReaderMap - branchedWriterState map[string]store.Writer - branch func(state store.Reader) store.Writer -} - -func (b WriterMap) GetReader(actor []byte) (store.Reader, error) { - return b.GetWriter(actor) -} - -func (b WriterMap) GetWriter(actor []byte) (store.Writer, error) { - // Simplify and optimize state retrieval - if actorState, ok := b.branchedWriterState[unsafeString(actor)]; ok { - return actorState, nil - } else if writerState, err := b.state.GetReader(actor); err != nil { - return nil, err - } else { - actorState = b.branch(writerState) - b.branchedWriterState[string(actor)] = actorState - return actorState, nil - } -} - -func (b WriterMap) ApplyStateChanges(stateChanges []store.StateChanges) error { - for _, sc := range stateChanges { - if err := b.applyStateChange(sc); err != nil { - return fmt.Errorf("unable to apply state change for actor %X: %w", sc.Actor, err) - } - } - return nil -} - -// GetStateChanges returns the state changes for all actors in the WriterMap. -func (b WriterMap) GetStateChanges() ([]store.StateChanges, error) { - sc := make([]store.StateChanges, 0, len(b.branchedWriterState)) - for acc, w := range b.branchedWriterState { - accBytes := []byte(acc) - kvChanges, err := w.ChangeSets() - if err != nil { - return nil, fmt.Errorf("unable to get actor writer changes %x: %w", accBytes, err) - } - sc = append(sc, store.StateChanges{ - Actor: accBytes, - StateChanges: kvChanges, - }) - } - - return sc, nil -} - -func (b WriterMap) applyStateChange(sc store.StateChanges) error { - writableState, err := b.GetWriter(sc.Actor) - if err != nil { - return err - } - return writableState.ApplyChangeSets(sc.StateChanges) -} - -func unsafeString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } diff --git a/server/v2/stf/core_branch_service.go b/server/v2/stf/core_branch_service.go deleted file mode 100644 index 365d73d532..0000000000 --- a/server/v2/stf/core_branch_service.go +++ /dev/null @@ -1,75 +0,0 @@ -package stf - -import ( - "context" - - "cosmossdk.io/core/branch" - "cosmossdk.io/core/store" -) - -type branchFn func(state store.ReaderMap) store.WriterMap - -var _ branch.Service = (*BranchService)(nil) - -type BranchService struct{} - -func (bs BranchService) Execute(ctx context.Context, f func(ctx context.Context) error) error { - return bs.execute(ctx.(*executionContext), f) -} - -func (bs BranchService) ExecuteWithGasLimit( - ctx context.Context, - gasLimit uint64, - f func(ctx context.Context) error, -) (gasUsed uint64, err error) { - stfCtx := ctx.(*executionContext) - - originalGasMeter := stfCtx.meter - - stfCtx.setGasLimit(gasLimit) - - // execute branched, with predefined gas limit. - err = bs.execute(stfCtx, f) - // restore original context - gasUsed = stfCtx.meter.Limit() - stfCtx.meter.Remaining() - _ = originalGasMeter.Consume(gasUsed, "execute-with-gas-limit") - stfCtx.setGasLimit(originalGasMeter.Limit() - originalGasMeter.Remaining()) - - return gasUsed, err -} - -func (bs BranchService) execute(ctx *executionContext, f func(ctx context.Context) error) error { - branchedState := ctx.branchFn(ctx.unmeteredState) - meteredBranchedState := ctx.makeGasMeteredStore(ctx.meter, branchedState) - - branchedCtx := &executionContext{ - Context: ctx.Context, - unmeteredState: branchedState, - state: meteredBranchedState, - meter: ctx.meter, - events: nil, - sender: ctx.sender, - headerInfo: ctx.headerInfo, - execMode: ctx.execMode, - branchFn: ctx.branchFn, - makeGasMeter: ctx.makeGasMeter, - makeGasMeteredStore: ctx.makeGasMeteredStore, - } - - err := f(branchedCtx) - if err != nil { - return err - } - - // apply state changes to original state - if len(branchedCtx.events) != 0 { - ctx.events = append(ctx.events, branchedCtx.events...) - } - - err = applyStateChanges(ctx.state, branchedCtx.unmeteredState) - if err != nil { - return err - } - - return nil -} diff --git a/server/v2/stf/core_branch_service_test.go b/server/v2/stf/core_branch_service_test.go deleted file mode 100644 index 960563faba..0000000000 --- a/server/v2/stf/core_branch_service_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package stf - -import ( - "context" - "fmt" - "testing" - - gogotypes "github.com/cosmos/gogoproto/types" - "github.com/stretchr/testify/require" - - appmodulev2 "cosmossdk.io/core/appmodule/v2" - "cosmossdk.io/server/v2/stf/branch" - "cosmossdk.io/server/v2/stf/gas" - "cosmossdk.io/server/v2/stf/mock" -) - -func TestBranchService(t *testing.T) { - s := &STF[mock.Tx]{ - doPreBlock: func(ctx context.Context, txs []mock.Tx) error { return nil }, - doBeginBlock: func(ctx context.Context) error { - kvSet(t, ctx, "begin-block") - return nil - }, - doEndBlock: func(ctx context.Context) error { - kvSet(t, ctx, "end-block") - return nil - }, - doValidatorUpdate: func(ctx context.Context) ([]appmodulev2.ValidatorUpdate, error) { return nil, nil }, - doTxValidation: func(ctx context.Context, tx mock.Tx) error { - kvSet(t, ctx, "validate") - return nil - }, - postTxExec: func(ctx context.Context, tx mock.Tx, success bool) error { - kvSet(t, ctx, "post-tx-exec") - return nil - }, - branchFn: branch.DefaultNewWriterMap, - makeGasMeter: gas.DefaultGasMeter, - makeGasMeteredState: gas.DefaultWrapWithGasMeter, - } - addMsgHandlerToSTF(t, s, func(ctx context.Context, msg *gogotypes.BoolValue) (*gogotypes.BoolValue, error) { - kvSet(t, ctx, "exec") - return nil, nil - }) - - makeContext := func() *executionContext { - state := mock.DB() - writableState := s.branchFn(state) - ctx := s.makeContext(context.Background(), []byte("cookies"), writableState, 0) - ctx.setGasLimit(1000000) - return ctx - } - - branchService := BranchService{} - - // TODO: add events check + gas limit precision test - - t.Run("ok", func(t *testing.T) { - stfCtx := makeContext() - gasUsed, err := branchService.ExecuteWithGasLimit(stfCtx, 10000, func(ctx context.Context) error { - kvSet(t, ctx, "cookies") - return nil - }) - require.NoError(t, err) - require.NotZero(t, gasUsed) - stateHas(t, stfCtx.state, "cookies") - }) - - t.Run("fail - reverts state", func(t *testing.T) { - stfCtx := makeContext() - gasUsed, err := branchService.ExecuteWithGasLimit(stfCtx, 10000, func(ctx context.Context) error { - kvSet(t, ctx, "cookies") - return fmt.Errorf("fail") - }) - require.Error(t, err) - require.NotZero(t, gasUsed) - stateNotHas(t, stfCtx.state, "cookies") - }) - - t.Run("fail - out of gas", func(t *testing.T) { - stfCtx := makeContext() - - gasUsed, err := branchService.ExecuteWithGasLimit(stfCtx, 4000, func(ctx context.Context) error { - state, _ := ctx.(*executionContext).state.GetWriter(actorName) - _ = state.Set([]byte("not out of gas"), []byte{}) - return state.Set([]byte("out of gas"), []byte{}) - }) - require.Error(t, err) - require.NotZero(t, gasUsed) - stateNotHas(t, stfCtx.state, "cookies") - require.Equal(t, uint64(1000), stfCtx.meter.Limit()-stfCtx.meter.Remaining()) - }) -} diff --git a/server/v2/stf/core_event_service.go b/server/v2/stf/core_event_service.go deleted file mode 100644 index 5bd94c6ea7..0000000000 --- a/server/v2/stf/core_event_service.go +++ /dev/null @@ -1,86 +0,0 @@ -package stf - -import ( - "context" - "encoding/json" - "slices" - - gogoproto "github.com/cosmos/gogoproto/proto" - "golang.org/x/exp/maps" - - "cosmossdk.io/core/event" -) - -func NewEventService() event.Service { - return eventService{} -} - -type eventService struct{} - -// EventManager implements event.Service. -func (eventService) EventManager(ctx context.Context) event.Manager { - return &eventManager{ctx.(*executionContext)} -} - -var _ event.Manager = (*eventManager)(nil) - -type eventManager struct { - executionContext *executionContext -} - -// Emit emits an typed event that is defined in the protobuf file. -// In the future these events will be added to consensus. -func (em *eventManager) Emit(tev gogoproto.Message) error { - res, err := TypedEventToEvent(tev) - if err != nil { - return err - } - - em.executionContext.events = append(em.executionContext.events, res) - return nil -} - -// EmitKV emits a key value pair event. -func (em *eventManager) EmitKV(eventType string, attrs ...event.Attribute) error { - em.executionContext.events = append(em.executionContext.events, event.NewEvent(eventType, attrs...)) - return nil -} - -// EmitNonConsensus emits an typed event that is defined in the protobuf file. -// These events will not be added to consensus. -func (em *eventManager) EmitNonConsensus(event gogoproto.Message) error { - return em.Emit(event) -} - -// TypedEventToEvent takes typed event and converts to Event object -func TypedEventToEvent(tev gogoproto.Message) (event.Event, error) { - evtType := gogoproto.MessageName(tev) - evtJSON, err := gogoproto.Marshal(tev) - if err != nil { - return event.Event{}, err - } - - var attrMap map[string]json.RawMessage - err = json.Unmarshal(evtJSON, &attrMap) - if err != nil { - return event.Event{}, err - } - - // sort the keys to ensure the order is always the same - keys := maps.Keys(attrMap) - slices.Sort(keys) - - attrs := make([]event.Attribute, 0, len(attrMap)) - for _, k := range keys { - v := attrMap[k] - attrs = append(attrs, event.Attribute{ - Key: k, - Value: string(v), - }) - } - - return event.Event{ - Type: evtType, - Attributes: attrs, - }, nil -} diff --git a/server/v2/stf/core_gas_service.go b/server/v2/stf/core_gas_service.go deleted file mode 100644 index 7378d12657..0000000000 --- a/server/v2/stf/core_gas_service.go +++ /dev/null @@ -1,45 +0,0 @@ -package stf - -import ( - "context" - - "cosmossdk.io/core/gas" - "cosmossdk.io/core/store" -) - -type ( - // makeGasMeterFn is a function type that takes a gas limit as input and returns a gas.Meter. - // It is used to measure and limit the amount of gas consumed during the execution of a function. - makeGasMeterFn func(gasLimit uint64) gas.Meter - - // makeGasMeteredStateFn is a function type that wraps a gas meter and a store writer map. - makeGasMeteredStateFn func(meter gas.Meter, store store.WriterMap) store.WriterMap -) - -// NewGasMeterService creates a new instance of the gas meter service. -func NewGasMeterService() gas.Service { - return gasService{} -} - -type gasService struct{} - -// GasConfig implements gas.Service. -func (g gasService) GasConfig(ctx context.Context) gas.GasConfig { - panic("unimplemented") -} - -func (g gasService) GasMeter(ctx context.Context) gas.Meter { - return ctx.(*executionContext).meter -} - -func (g gasService) BlockGasMeter(ctx context.Context) gas.Meter { - panic("stf has no block gas meter") -} - -func (g gasService) WithGasMeter(ctx context.Context, meter gas.Meter) context.Context { - panic("unimplemented") -} - -func (g gasService) WithBlockGasMeter(ctx context.Context, meter gas.Meter) context.Context { - panic("unimplemented") -} diff --git a/server/v2/stf/core_header_service.go b/server/v2/stf/core_header_service.go deleted file mode 100644 index 4448627828..0000000000 --- a/server/v2/stf/core_header_service.go +++ /dev/null @@ -1,55 +0,0 @@ -package stf - -import ( - "context" - - "cosmossdk.io/core/header" - "cosmossdk.io/core/store" -) - -var _ header.Service = (*HeaderService)(nil) - -type HeaderService struct{} - -func (h HeaderService) HeaderInfo(ctx context.Context) header.Info { - return ctx.(*executionContext).headerInfo -} - -const headerInfoPrefix = 0x37 - -// setHeaderInfo sets the header info in the state to be used by queries in the future. -func (s STF[T]) setHeaderInfo(state store.WriterMap, headerInfo header.Info) error { - // TODO storing header info is too low level here, stf should be stateless. - // We should have a keeper that does this. - runtimeStore, err := state.GetWriter(Identity) - if err != nil { - return err - } - bz, err := headerInfo.Bytes() - if err != nil { - return err - } - err = runtimeStore.Set([]byte{headerInfoPrefix}, bz) - if err != nil { - return err - } - return nil -} - -// getHeaderInfo gets the header info from the state. It should only be used for queries -func (s STF[T]) getHeaderInfo(state store.WriterMap) (i header.Info, err error) { - runtimeStore, err := state.GetWriter(Identity) - if err != nil { - return header.Info{}, err - } - v, err := runtimeStore.Get([]byte{headerInfoPrefix}) - if err != nil { - return header.Info{}, err - } - if v == nil { - return header.Info{}, nil - } - - err = i.FromBytes(v) - return i, err -} diff --git a/server/v2/stf/core_router_service.go b/server/v2/stf/core_router_service.go deleted file mode 100644 index 9363632271..0000000000 --- a/server/v2/stf/core_router_service.go +++ /dev/null @@ -1,73 +0,0 @@ -package stf - -import ( - "context" - - gogoproto "github.com/cosmos/gogoproto/proto" - - "cosmossdk.io/core/router" - "cosmossdk.io/core/transaction" -) - -// NewMsgRouterService implements router.Service. -func NewMsgRouterService(identity transaction.Identity) router.Service { - return msgRouterService{identity: identity} -} - -var _ router.Service = (*msgRouterService)(nil) - -type msgRouterService struct { - // TODO(tip): the identity sits here for the purpose of disallowing modules to impersonate others (sudo). - // right now this is not used, but it serves the reminder of something that we should be eventually - // looking into. - identity []byte -} - -// CanInvoke returns an error if the given message cannot be invoked. -func (m msgRouterService) CanInvoke(ctx context.Context, typeURL string) error { - return ctx.(*executionContext).msgRouter.CanInvoke(ctx, typeURL) -} - -// InvokeTyped execute a message and fill-in a response. -// The response must be known and passed as a parameter. -// Use InvokeUntyped if the response type is not known. -func (m msgRouterService) InvokeTyped(ctx context.Context, msg, resp gogoproto.Message) error { - return ctx.(*executionContext).msgRouter.InvokeTyped(ctx, msg, resp) -} - -// InvokeUntyped execute a message and returns a response. -func (m msgRouterService) InvokeUntyped(ctx context.Context, msg gogoproto.Message) (gogoproto.Message, error) { - return ctx.(*executionContext).msgRouter.InvokeUntyped(ctx, msg) -} - -// NewQueryRouterService implements router.Service. -func NewQueryRouterService() router.Service { - return queryRouterService{} -} - -var _ router.Service = (*queryRouterService)(nil) - -type queryRouterService struct{} - -// CanInvoke returns an error if the given request cannot be invoked. -func (m queryRouterService) CanInvoke(ctx context.Context, typeURL string) error { - return ctx.(*executionContext).queryRouter.CanInvoke(ctx, typeURL) -} - -// InvokeTyped execute a message and fill-in a response. -// The response must be known and passed as a parameter. -// Use InvokeUntyped if the response type is not known. -func (m queryRouterService) InvokeTyped( - ctx context.Context, - req, resp gogoproto.Message, -) error { - return ctx.(*executionContext).queryRouter.InvokeTyped(ctx, req, resp) -} - -// InvokeUntyped execute a message and returns a response. -func (m queryRouterService) InvokeUntyped( - ctx context.Context, - req gogoproto.Message, -) (gogoproto.Message, error) { - return ctx.(*executionContext).queryRouter.InvokeUntyped(ctx, req) -} diff --git a/server/v2/stf/core_store_service.go b/server/v2/stf/core_store_service.go deleted file mode 100644 index d912f92771..0000000000 --- a/server/v2/stf/core_store_service.go +++ /dev/null @@ -1,33 +0,0 @@ -package stf - -import ( - "context" - - "cosmossdk.io/core/store" -) - -var _ store.KVStoreService = (*storeService)(nil) - -func NewKVStoreService(address []byte) store.KVStoreService { - return storeService{actor: address} -} - -func NewMemoryStoreService(address []byte) store.MemoryStoreService { - return storeService{actor: address} -} - -type storeService struct { - actor []byte -} - -func (s storeService) OpenKVStore(ctx context.Context) store.KVStore { - state, err := ctx.(*executionContext).state.GetWriter(s.actor) - if err != nil { - panic(err) - } - return state -} - -func (s storeService) OpenMemoryStore(ctx context.Context) store.KVStore { - return s.OpenKVStore(ctx) -} diff --git a/server/v2/stf/export_test.go b/server/v2/stf/export_test.go deleted file mode 100644 index b84148abdd..0000000000 --- a/server/v2/stf/export_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package stf - -import ( - "context" -) - -func GetExecutionContext(ctx context.Context) *executionContext { - executionCtx, ok := ctx.(*executionContext) - if !ok { - return nil - } - return executionCtx -} diff --git a/server/v2/stf/gas/defaults.go b/server/v2/stf/gas/defaults.go deleted file mode 100644 index 8906e31da6..0000000000 --- a/server/v2/stf/gas/defaults.go +++ /dev/null @@ -1,46 +0,0 @@ -package gas - -import ( - coregas "cosmossdk.io/core/gas" - "cosmossdk.io/core/store" -) - -// DefaultWrapWithGasMeter defines the default wrap with gas meter function in stf. In case -// the meter sets as limit stf.NoGasLimit, then a fast path is taken and the store.WriterMap -// is returned. -func DefaultWrapWithGasMeter(meter coregas.Meter, state store.WriterMap) store.WriterMap { - if meter.Limit() == coregas.NoGasLimit { - return state - } - return NewMeteredWriterMap(DefaultConfig, meter, state) -} - -// DefaultGasMeter returns the default gas meter. In case it is coregas.NoGasLimit a NoOpMeter is returned. -func DefaultGasMeter(gasLimit uint64) coregas.Meter { - if gasLimit == coregas.NoGasLimit { - return NoOpMeter{} - } - return NewMeter(gasLimit) -} - -var DefaultConfig = StoreConfig{ - HasCost: 1000, - DeleteCostFlat: 1000, - ReadCostFlat: 1000, - ReadCostPerByte: 3, - WriteCostFlat: 2000, - WriteCostPerByte: 30, - IterNextCostFlat: 30, -} - -type NoOpMeter struct{} - -func (n NoOpMeter) Consumed() coregas.Gas { return 0 } - -func (n NoOpMeter) Limit() coregas.Gas { return coregas.NoGasLimit } - -func (n NoOpMeter) Consume(_ coregas.Gas, _ string) error { return nil } - -func (n NoOpMeter) Refund(_ coregas.Gas, _ string) error { return nil } - -func (n NoOpMeter) Remaining() coregas.Gas { return coregas.NoGasLimit } diff --git a/server/v2/stf/gas/meter.go b/server/v2/stf/gas/meter.go deleted file mode 100644 index 9f830260b5..0000000000 --- a/server/v2/stf/gas/meter.go +++ /dev/null @@ -1,57 +0,0 @@ -package gas - -import ( - "cosmossdk.io/core/gas" -) - -var _ gas.Meter = (*Meter)(nil) - -type Meter struct { - limit uint64 - consumed uint64 -} - -// NewMeter creates a new gas meter with the given gas limit. -// The gas meter keeps track of the gas consumed during execution. -func NewMeter(gasLimit uint64) gas.Meter { - return &Meter{ - limit: gasLimit, - consumed: 0, - } -} - -// Consumed returns the amount of gas consumed by the meter. -func (m *Meter) Consumed() gas.Gas { - return m.consumed -} - -// Limit returns the maximum gas limit allowed for the meter. -func (m *Meter) Limit() gas.Gas { - return m.limit -} - -// Consume consumes the specified amount of gas from the meter. -// It returns an error if the requested gas exceeds the remaining gas limit. -func (m *Meter) Consume(requested gas.Gas, _ string) error { - remaining := m.limit - m.consumed - if requested > remaining { - return gas.ErrOutOfGas - } - m.consumed += requested - return nil -} - -// Refund refunds the specified amount of gas. -// If the amount is less than the consumed gas, it subtracts the amount from the consumed gas. -// It returns nil error. -func (m *Meter) Refund(amount gas.Gas, _ string) error { - if amount < m.consumed { - m.consumed -= amount - } - return nil -} - -// Remaining returns the remaining gas limit. -func (m *Meter) Remaining() gas.Gas { - return m.limit - m.consumed -} diff --git a/server/v2/stf/gas/store.go b/server/v2/stf/gas/store.go deleted file mode 100644 index 8ea15f12dd..0000000000 --- a/server/v2/stf/gas/store.go +++ /dev/null @@ -1,182 +0,0 @@ -package gas - -import ( - "cosmossdk.io/core/gas" - "cosmossdk.io/core/store" -) - -// Gas consumption descriptors. -const ( - DescIterNextCostFlat = "IterNextFlat" - DescValuePerByte = "ValuePerByte" - DescWritePerByte = "WritePerByte" - DescReadPerByte = "ReadPerByte" - DescWriteCostFlat = "WriteFlat" - DescReadCostFlat = "ReadFlat" - DescHas = "Has" - DescDelete = "Delete" -) - -type StoreConfig struct { - ReadCostFlat, ReadCostPerByte, HasCost gas.Gas - WriteCostFlat, WriteCostPerByte, DeleteCostFlat gas.Gas - IterNextCostFlat gas.Gas -} - -type Store struct { - parent store.Writer - gasMeter gas.Meter - gasConfig StoreConfig -} - -func NewStore(gc StoreConfig, meter gas.Meter, parent store.Writer) *Store { - return &Store{ - parent: parent, - gasMeter: meter, - gasConfig: gc, - } -} - -func (s *Store) Get(key []byte) ([]byte, error) { - if err := s.gasMeter.Consume(s.gasConfig.ReadCostFlat, DescReadCostFlat); err != nil { - return nil, err - } - - value, err := s.parent.Get(key) - if err := s.gasMeter.Consume(s.gasConfig.ReadCostPerByte*gas.Gas(len(key)), DescReadPerByte); err != nil { - return nil, err - } - if err := s.gasMeter.Consume(s.gasConfig.ReadCostPerByte*gas.Gas(len(value)), DescReadPerByte); err != nil { - return nil, err - } - - return value, err -} - -func (s *Store) Has(key []byte) (bool, error) { - if err := s.gasMeter.Consume(s.gasConfig.HasCost, DescHas); err != nil { - return false, err - } - - return s.parent.Has(key) -} - -func (s *Store) Set(key, value []byte) error { - if err := s.gasMeter.Consume(s.gasConfig.WriteCostFlat, DescWriteCostFlat); err != nil { - return err - } - if err := s.gasMeter.Consume(s.gasConfig.WriteCostPerByte*gas.Gas(len(key)), DescWritePerByte); err != nil { - return err - } - if err := s.gasMeter.Consume(s.gasConfig.WriteCostPerByte*gas.Gas(len(value)), DescWritePerByte); err != nil { - return err - } - - return s.parent.Set(key, value) -} - -func (s *Store) Delete(key []byte) error { - if err := s.gasMeter.Consume(s.gasConfig.DeleteCostFlat, DescDelete); err != nil { - return err - } - - return s.parent.Delete(key) -} - -func (s *Store) ApplyChangeSets(changes []store.KVPair) error { - return s.parent.ApplyChangeSets(changes) -} - -func (s *Store) ChangeSets() ([]store.KVPair, error) { - return s.parent.ChangeSets() -} - -func (s *Store) Iterator(start, end []byte) (store.Iterator, error) { - itr, err := s.parent.Iterator(start, end) - if err != nil { - return nil, err - } - - return newIterator(itr, s.gasMeter, s.gasConfig), nil -} - -func (s *Store) ReverseIterator(start, end []byte) (store.Iterator, error) { - itr, err := s.parent.ReverseIterator(start, end) - if err != nil { - return nil, err - } - - return newIterator(itr, s.gasMeter, s.gasConfig), nil -} - -var _ store.Iterator = (*iterator)(nil) - -type iterator struct { - gasMeter gas.Meter - gasConfig StoreConfig - parent store.Iterator -} - -func newIterator(parent store.Iterator, gm gas.Meter, gc StoreConfig) store.Iterator { - return &iterator{ - parent: parent, - gasConfig: gc, - gasMeter: gm, - } -} - -func (itr *iterator) Domain() ([]byte, []byte) { - return itr.parent.Domain() -} - -func (itr *iterator) Valid() bool { - return itr.parent.Valid() -} - -func (itr *iterator) Key() []byte { - return itr.parent.Key() -} - -func (itr *iterator) Value() []byte { - return itr.parent.Value() -} - -func (itr *iterator) Next() { - if err := itr.consumeGasSeek(); err != nil { - // closing the iterator prematurely to prevent further execution - itr.parent.Close() - return - } - itr.parent.Next() -} - -func (itr *iterator) Close() error { - return itr.parent.Close() -} - -func (itr *iterator) Error() error { - return itr.parent.Error() -} - -// consumeGasSeek consumes a fixed amount of gas for each iteration step and a -// variable gas cost based on the current key and value's length. This is called -// prior to the iterator's Next() call. -func (itr *iterator) consumeGasSeek() error { - if itr.Valid() { - key := itr.Key() - value := itr.Value() - - if err := itr.gasMeter.Consume(itr.gasConfig.ReadCostPerByte*gas.Gas(len(key)), DescValuePerByte); err != nil { - return err - } - if err := itr.gasMeter.Consume(itr.gasConfig.ReadCostPerByte*gas.Gas(len(value)), DescValuePerByte); err != nil { - return err - } - } - - if err := itr.gasMeter.Consume(itr.gasConfig.IterNextCostFlat, DescIterNextCostFlat); err != nil { - return err - } - - return nil -} diff --git a/server/v2/stf/gas/writer_map.go b/server/v2/stf/gas/writer_map.go deleted file mode 100644 index cd5fa406d1..0000000000 --- a/server/v2/stf/gas/writer_map.go +++ /dev/null @@ -1,56 +0,0 @@ -package gas - -import ( - "unsafe" - - "cosmossdk.io/core/gas" - "cosmossdk.io/core/store" -) - -func NewMeteredWriterMap(conf StoreConfig, meter gas.Meter, state store.WriterMap) MeteredWriterMap { - return MeteredWriterMap{ - config: conf, - meter: meter, - state: state, - cacheMeteredStores: make(map[string]*Store), - } -} - -// MeteredWriterMap wraps store.Writer and returns a gas metered -// version of it. Since the gas meter is shared across different -// writers, the metered writers are memoized. -type MeteredWriterMap struct { - config StoreConfig - meter gas.Meter - state store.WriterMap - cacheMeteredStores map[string]*Store -} - -func (m MeteredWriterMap) GetReader(actor []byte) (store.Reader, error) { return m.GetWriter(actor) } - -func (m MeteredWriterMap) GetWriter(actor []byte) (store.Writer, error) { - cached, ok := m.cacheMeteredStores[unsafeString(actor)] - if ok { - return cached, nil - } - - state, err := m.state.GetWriter(actor) - if err != nil { - return nil, err - } - - meteredState := NewStore(m.config, m.meter, state) - m.cacheMeteredStores[string(actor)] = meteredState - - return meteredState, nil -} - -func (m MeteredWriterMap) ApplyStateChanges(stateChanges []store.StateChanges) error { - return m.state.ApplyStateChanges(stateChanges) -} - -func (m MeteredWriterMap) GetStateChanges() ([]store.StateChanges, error) { - return m.state.GetStateChanges() -} - -func unsafeString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } diff --git a/server/v2/stf/go.mod b/server/v2/stf/go.mod deleted file mode 100644 index 59c6888ecb..0000000000 --- a/server/v2/stf/go.mod +++ /dev/null @@ -1,21 +0,0 @@ -module cosmossdk.io/server/v2/stf - -go 1.21 - -replace cosmossdk.io/core => ../../../core - -require ( - cosmossdk.io/core v0.11.0 - github.com/cosmos/gogoproto v1.5.0 - github.com/stretchr/testify v1.9.0 - github.com/tidwall/btree v1.7.0 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d -) - -require ( - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/server/v2/stf/go.sum b/server/v2/stf/go.sum deleted file mode 100644 index fe0851fd94..0000000000 --- a/server/v2/stf/go.sum +++ /dev/null @@ -1,22 +0,0 @@ -github.com/cosmos/gogoproto v1.5.0 h1:SDVwzEqZDDBoslaeZg+dGE55hdzHfgUA40pEanMh52o= -github.com/cosmos/gogoproto v1.5.0/go.mod h1:iUM31aofn3ymidYG6bUR5ZFrk+Om8p5s754eMUcyp8I= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= -github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/server/v2/stf/internal/transaction.go b/server/v2/stf/internal/transaction.go deleted file mode 100644 index 3499472b6f..0000000000 --- a/server/v2/stf/internal/transaction.go +++ /dev/null @@ -1,18 +0,0 @@ -package internal - -import "cosmossdk.io/core/transaction" - -// All possible transaction execution modes. -// For backwards compatibility and easier casting, the ExecMode values must be: -// 1) set equivalent to cosmos/cosmos-sdk/types package. -// 2) a superset of core/transaction/service.go:ExecMode with same numeric values. -const ( - ExecModeCheck transaction.ExecMode = iota - ExecModeReCheck - ExecModeSimulate - ExecModePrepareProposal - ExecModeProcessProposal - ExecModeVoteExtension - ExecModeVerifyVoteExtension - ExecModeFinalize -) diff --git a/server/v2/stf/mock/db.go b/server/v2/stf/mock/db.go deleted file mode 100644 index fbc405c2b1..0000000000 --- a/server/v2/stf/mock/db.go +++ /dev/null @@ -1,38 +0,0 @@ -package mock - -import ( - "cosmossdk.io/core/store" -) - -func DB() store.ReaderMap { - return actorState{kv: map[string][]byte{}} -} - -type actorState struct { - kv map[string][]byte -} - -func (m actorState) GetReader(address []byte) (store.Reader, error) { - return memState{address, m.kv}, nil -} - -type memState struct { - address []byte - kv map[string][]byte -} - -func (m memState) Has(key []byte) (bool, error) { - v, err := m.Get(key) - return v != nil, err -} - -func (m memState) Get(bytes []byte) ([]byte, error) { - key := append(m.address, bytes...) - return m.kv[string(key)], nil -} - -func (m memState) Iterator(start, end []byte) (store.Iterator, error) { panic("implement me") } - -func (m memState) ReverseIterator(start, end []byte) (store.Iterator, error) { - panic("implement me") -} diff --git a/server/v2/stf/mock/tx.go b/server/v2/stf/mock/tx.go deleted file mode 100644 index 1f56edb202..0000000000 --- a/server/v2/stf/mock/tx.go +++ /dev/null @@ -1,108 +0,0 @@ -package mock - -import ( - "crypto/sha256" - "encoding/json" - "errors" - - gogoproto "github.com/cosmos/gogoproto/types" - - "cosmossdk.io/core/transaction" -) - -var _ transaction.Tx = Tx{} - -type Tx struct { - Sender []byte - Msg transaction.Msg - GasLimit uint64 -} - -func (t Tx) Hash() [32]byte { - return sha256.Sum256(t.Bytes()) -} - -func (t Tx) GetMessages() ([]transaction.Msg, error) { - if t.Msg == nil { - return nil, errors.New("messages not available or are nil") - } - return []transaction.Msg{t.Msg}, nil -} - -func (t Tx) GetSenders() ([]transaction.Identity, error) { - if t.Sender == nil { - return nil, errors.New("senders not available or are nil") - } - return []transaction.Identity{t.Sender}, nil -} - -func (t Tx) GetGasLimit() (uint64, error) { - return t.GasLimit, nil -} - -type encodedTx struct { - Sender []byte `json:"sender"` - Msg *gogoproto.Any `json:"message"` - GasLimit uint64 `json:"gas_limit"` -} - -func (t Tx) Bytes() []byte { - v2Msg := t.Msg - msg, err := gogoproto.MarshalAny(v2Msg) - if err != nil { - panic(err) - } - tx, err := json.Marshal(encodedTx{ - Sender: t.Sender, - Msg: msg, - GasLimit: t.GasLimit, - }) - if err != nil { - panic(err) - } - return tx -} - -func (t *Tx) Decode(b []byte) { - rawTx := new(encodedTx) - err := json.Unmarshal(b, rawTx) - if err != nil { - panic(err) - } - var msg transaction.Msg - if err := gogoproto.UnmarshalAny(rawTx.Msg, msg); err != nil { - panic(err) - } - t.Msg = msg - t.Sender = rawTx.Sender - t.GasLimit = rawTx.GasLimit -} - -func (t *Tx) DecodeJSON(b []byte) { - rawTx := new(encodedTx) - err := json.Unmarshal(b, rawTx) - if err != nil { - panic(err) - } - var msg transaction.Msg - if err := gogoproto.UnmarshalAny(rawTx.Msg, msg); err != nil { - panic(err) - } - t.Msg = msg - t.Sender = rawTx.Sender - t.GasLimit = rawTx.GasLimit -} - -type TxCodec struct{} - -func (TxCodec) Decode(bytes []byte) (Tx, error) { - t := new(Tx) - t.Decode(bytes) - return *t, nil -} - -func (TxCodec) DecodeJSON(bytes []byte) (Tx, error) { - t := new(Tx) - t.DecodeJSON(bytes) - return *t, nil -} diff --git a/server/v2/stf/stf.go b/server/v2/stf/stf.go deleted file mode 100644 index d6cf27115d..0000000000 --- a/server/v2/stf/stf.go +++ /dev/null @@ -1,630 +0,0 @@ -package stf - -import ( - "context" - "errors" - "fmt" - - appmanager "cosmossdk.io/core/app" - appmodulev2 "cosmossdk.io/core/appmodule/v2" - corecontext "cosmossdk.io/core/context" - "cosmossdk.io/core/event" - "cosmossdk.io/core/gas" - "cosmossdk.io/core/header" - "cosmossdk.io/core/log" - "cosmossdk.io/core/router" - "cosmossdk.io/core/store" - "cosmossdk.io/core/transaction" - stfgas "cosmossdk.io/server/v2/stf/gas" - "cosmossdk.io/server/v2/stf/internal" -) - -// Identity defines STF's bytes identity and it's used by STF to store things in its own state. -var Identity = []byte("stf") - -// STF is a struct that manages the state transition component of the app. -type STF[T transaction.Tx] struct { - logger log.Logger - - msgRouter Router - queryRouter Router - - doPreBlock func(ctx context.Context, txs []T) error - doBeginBlock func(ctx context.Context) error - doEndBlock func(ctx context.Context) error - doValidatorUpdate func(ctx context.Context) ([]appmodulev2.ValidatorUpdate, error) - - doTxValidation func(ctx context.Context, tx T) error - postTxExec func(ctx context.Context, tx T, success bool) error - - branchFn branchFn // branchFn is a function that given a readonly state it returns a writable version of it. - makeGasMeter makeGasMeterFn - makeGasMeteredState makeGasMeteredStateFn -} - -// NewSTF returns a new STF instance. -func NewSTF[T transaction.Tx]( - logger log.Logger, - msgRouterBuilder *MsgRouterBuilder, - queryRouterBuilder *MsgRouterBuilder, - doPreBlock func(ctx context.Context, txs []T) error, - doBeginBlock func(ctx context.Context) error, - doEndBlock func(ctx context.Context) error, - doTxValidation func(ctx context.Context, tx T) error, - doValidatorUpdate func(ctx context.Context) ([]appmodulev2.ValidatorUpdate, error), - postTxExec func(ctx context.Context, tx T, success bool) error, - branch func(store store.ReaderMap) store.WriterMap, -) (*STF[T], error) { - msgRouter, err := msgRouterBuilder.Build() - if err != nil { - return nil, fmt.Errorf("build msg router: %w", err) - } - queryRouter, err := queryRouterBuilder.Build() - if err != nil { - return nil, fmt.Errorf("build query router: %w", err) - } - - return &STF[T]{ - logger: logger, - msgRouter: msgRouter, - queryRouter: queryRouter, - doPreBlock: doPreBlock, - doBeginBlock: doBeginBlock, - doEndBlock: doEndBlock, - doValidatorUpdate: doValidatorUpdate, - doTxValidation: doTxValidation, - postTxExec: postTxExec, // TODO - branchFn: branch, - makeGasMeter: stfgas.DefaultGasMeter, - makeGasMeteredState: stfgas.DefaultWrapWithGasMeter, - }, nil -} - -// DeliverBlock is our state transition function. -// It takes a read only view of the state to apply the block to, -// executes the block and returns the block results and the new state. -func (s STF[T]) DeliverBlock( - ctx context.Context, - block *appmanager.BlockRequest[T], - state store.ReaderMap, -) (blockResult *appmanager.BlockResponse, newState store.WriterMap, err error) { - // creates a new branchFn state, from the readonly view of the state - // that can be written to. - newState = s.branchFn(state) - hi := header.Info{ - Hash: block.Hash, - AppHash: block.AppHash, - ChainID: block.ChainId, - Time: block.Time, - Height: int64(block.Height), - } - // set header info - err = s.setHeaderInfo(newState, hi) - if err != nil { - return nil, nil, fmt.Errorf("unable to set initial header info, %w", err) - } - - exCtx := s.makeContext(ctx, appmanager.ConsensusIdentity, newState, internal.ExecModeFinalize) - exCtx.setHeaderInfo(hi) - consMessagesResponses, err := s.runConsensusMessages(exCtx, block.ConsensusMessages) - if err != nil { - return nil, nil, fmt.Errorf("failed to execute consensus messages: %w", err) - } - - // reset events - exCtx.events = make([]event.Event, 0) - // pre block is called separate from begin block in order to prepopulate state - preBlockEvents, err := s.preBlock(exCtx, block.Txs) - if err != nil { - return nil, nil, err - } - - if err = isCtxCancelled(ctx); err != nil { - return nil, nil, err - } - - // reset events - exCtx.events = make([]event.Event, 0) - - // begin block - var beginBlockEvents []event.Event - if !block.IsGenesis { - // begin block - beginBlockEvents, err = s.beginBlock(exCtx) - if err != nil { - return nil, nil, err - } - } - - // check if we need to return early - if err = isCtxCancelled(ctx); err != nil { - return nil, nil, err - } - - // execute txs - txResults := make([]appmanager.TxResult, len(block.Txs)) - // TODO: skip first tx if vote extensions are enabled (marko) - for i, txBytes := range block.Txs { - // check if we need to return early or continue delivering txs - if err = isCtxCancelled(ctx); err != nil { - return nil, nil, err - } - txResults[i] = s.deliverTx(exCtx, newState, txBytes, transaction.ExecModeFinalize, hi) - } - // reset events - exCtx.events = make([]event.Event, 0) - // end block - endBlockEvents, valset, err := s.endBlock(exCtx) - if err != nil { - return nil, nil, err - } - - return &appmanager.BlockResponse{ - Apphash: nil, - ConsensusMessagesResponse: consMessagesResponses, - ValidatorUpdates: valset, - PreBlockEvents: preBlockEvents, - BeginBlockEvents: beginBlockEvents, - TxResults: txResults, - EndBlockEvents: endBlockEvents, - }, newState, nil -} - -// deliverTx executes a TX and returns the result. -func (s STF[T]) deliverTx( - ctx context.Context, - state store.WriterMap, - tx T, - execMode transaction.ExecMode, - hi header.Info, -) appmanager.TxResult { - // recover in the case of a panic - var recoveryError error - defer func() { - if r := recover(); r != nil { - recoveryError = fmt.Errorf("panic during transaction execution: %s", r) - s.logger.Error("panic during transaction execution", "error", recoveryError) - } - }() - // handle error from GetGasLimit - gasLimit, gasLimitErr := tx.GetGasLimit() - if gasLimitErr != nil { - return appmanager.TxResult{ - Error: gasLimitErr, - } - } - - if recoveryError != nil { - return appmanager.TxResult{ - Error: recoveryError, - } - } - - validateGas, validationEvents, err := s.validateTx(ctx, state, gasLimit, tx) - if err != nil { - return appmanager.TxResult{ - Error: err, - } - } - - execResp, execGas, execEvents, err := s.execTx(ctx, state, gasLimit-validateGas, tx, execMode, hi) - return appmanager.TxResult{ - Events: append(validationEvents, execEvents...), - GasUsed: execGas + validateGas, - GasWanted: gasLimit, - Resp: execResp, - Error: err, - } -} - -// validateTx validates a transaction given the provided WritableState and gas limit. -// If the validation is successful, state is committed -func (s STF[T]) validateTx( - ctx context.Context, - state store.WriterMap, - gasLimit uint64, - tx T, -) (gasUsed uint64, events []event.Event, err error) { - validateState := s.branchFn(state) - hi, err := s.getHeaderInfo(validateState) - if err != nil { - return 0, nil, err - } - validateCtx := s.makeContext(ctx, appmanager.RuntimeIdentity, validateState, transaction.ExecModeCheck) - validateCtx.setHeaderInfo(hi) - validateCtx.setGasLimit(gasLimit) - err = s.doTxValidation(validateCtx, tx) - if err != nil { - return 0, nil, err - } - - consumed := validateCtx.meter.Limit() - validateCtx.meter.Remaining() - - return consumed, validateCtx.events, applyStateChanges(state, validateState) -} - -// execTx executes the tx messages on the provided state. If the tx fails then the state is discarded. -func (s STF[T]) execTx( - ctx context.Context, - state store.WriterMap, - gasLimit uint64, - tx T, - execMode transaction.ExecMode, - hi header.Info, -) ([]transaction.Msg, uint64, []event.Event, error) { - execState := s.branchFn(state) - - msgsResp, gasUsed, runTxMsgsEvents, txErr := s.runTxMsgs(ctx, execState, gasLimit, tx, execMode, hi) - if txErr != nil { - // in case of error during message execution, we do not apply the exec state. - // instead we run the post exec handler in a new branchFn from the initial state. - postTxState := s.branchFn(state) - postTxCtx := s.makeContext(ctx, appmanager.RuntimeIdentity, postTxState, execMode) - postTxCtx.setHeaderInfo(hi) - - postTxErr := s.postTxExec(postTxCtx, tx, false) - if postTxErr != nil { - // if the post tx handler fails, then we do not apply any state change to the initial state. - // we just return the exec gas used and a joined error from TX error and post TX error. - return nil, gasUsed, nil, errors.Join(txErr, postTxErr) - } - // in case post tx is successful, then we commit the post tx state to the initial state, - // and we return post tx events alongside exec gas used and the error of the tx. - applyErr := applyStateChanges(state, postTxState) - if applyErr != nil { - return nil, 0, nil, applyErr - } - return nil, gasUsed, postTxCtx.events, txErr - } - // tx execution went fine, now we use the same state to run the post tx exec handler, - // in case the execution of the post tx fails, then no state change is applied and the - // whole execution step is rolled back. - postTxCtx := s.makeContext(ctx, appmanager.RuntimeIdentity, execState, execMode) // NO gas limit. - postTxCtx.setHeaderInfo(hi) - postTxErr := s.postTxExec(postTxCtx, tx, true) - if postTxErr != nil { - // if post tx fails, then we do not apply any state change, we return the post tx error, - // alongside the gas used. - return nil, gasUsed, nil, postTxErr - } - // both the execution and post tx execution step were successful, so we apply the state changes - // to the provided state, and we return responses, and events from exec tx and post tx exec. - applyErr := applyStateChanges(state, execState) - if applyErr != nil { - return nil, 0, nil, applyErr - } - - return msgsResp, gasUsed, append(runTxMsgsEvents, postTxCtx.events...), nil -} - -// runTxMsgs will execute the messages contained in the TX with the provided state. -func (s STF[T]) runTxMsgs( - ctx context.Context, - state store.WriterMap, - gasLimit uint64, - tx T, - execMode transaction.ExecMode, - hi header.Info, -) ([]transaction.Msg, uint64, []event.Event, error) { - txSenders, err := tx.GetSenders() - if err != nil { - return nil, 0, nil, err - } - msgs, err := tx.GetMessages() - if err != nil { - return nil, 0, nil, err - } - msgResps := make([]transaction.Msg, len(msgs)) - - execCtx := s.makeContext(ctx, nil, state, execMode) - execCtx.setHeaderInfo(hi) - execCtx.setGasLimit(gasLimit) - for i, msg := range msgs { - execCtx.sender = txSenders[i] - resp, err := s.msgRouter.InvokeUntyped(execCtx, msg) - if err != nil { - return nil, 0, nil, fmt.Errorf("message execution at index %d failed: %w", i, err) - } - msgResps[i] = resp - } - - consumed := execCtx.meter.Limit() - execCtx.meter.Remaining() - return msgResps, consumed, execCtx.events, nil -} - -func (s STF[T]) preBlock( - ctx *executionContext, - txs []T, -) ([]event.Event, error) { - err := s.doPreBlock(ctx, txs) - if err != nil { - return nil, err - } - - for i, e := range ctx.events { - ctx.events[i].Attributes = append( - e.Attributes, - event.Attribute{Key: "mode", Value: "PreBlock"}, - ) - } - - return ctx.events, nil -} - -func (s STF[T]) runConsensusMessages( - ctx *executionContext, - messages []transaction.Msg, -) ([]transaction.Msg, error) { - responses := make([]transaction.Msg, len(messages)) - for i := range messages { - resp, err := s.msgRouter.InvokeUntyped(ctx, messages[i]) - if err != nil { - return nil, err - } - responses[i] = resp - } - - return responses, nil -} - -func (s STF[T]) beginBlock( - ctx *executionContext, -) (beginBlockEvents []event.Event, err error) { - err = s.doBeginBlock(ctx) - if err != nil { - return nil, err - } - - for i, e := range ctx.events { - ctx.events[i].Attributes = append( - e.Attributes, - event.Attribute{Key: "mode", Value: "BeginBlock"}, - ) - } - - return ctx.events, nil -} - -func (s STF[T]) endBlock( - ctx *executionContext, -) ([]event.Event, []appmodulev2.ValidatorUpdate, error) { - err := s.doEndBlock(ctx) - if err != nil { - return nil, nil, err - } - - events, valsetUpdates, err := s.validatorUpdates(ctx) - if err != nil { - return nil, nil, err - } - - ctx.events = append(ctx.events, events...) - - for i, e := range ctx.events { - ctx.events[i].Attributes = append( - e.Attributes, - event.Attribute{Key: "mode", Value: "BeginBlock"}, - ) - } - - return ctx.events, valsetUpdates, nil -} - -// validatorUpdates returns the validator updates for the current block. It is called by endBlock after the endblock execution has concluded -func (s STF[T]) validatorUpdates( - ctx *executionContext, -) ([]event.Event, []appmodulev2.ValidatorUpdate, error) { - valSetUpdates, err := s.doValidatorUpdate(ctx) - if err != nil { - return nil, nil, err - } - return ctx.events, valSetUpdates, nil -} - -// Simulate simulates the execution of a tx on the provided state. -func (s STF[T]) Simulate( - ctx context.Context, - state store.ReaderMap, - gasLimit uint64, - tx T, -) (appmanager.TxResult, store.WriterMap) { - simulationState := s.branchFn(state) - hi, err := s.getHeaderInfo(simulationState) - if err != nil { - return appmanager.TxResult{}, nil - } - txr := s.deliverTx(ctx, simulationState, tx, internal.ExecModeSimulate, hi) - - return txr, simulationState -} - -// ValidateTx will run only the validation steps required for a transaction. -// Validations are run over the provided state, with the provided gas limit. -func (s STF[T]) ValidateTx( - ctx context.Context, - state store.ReaderMap, - gasLimit uint64, - tx T, -) appmanager.TxResult { - validationState := s.branchFn(state) - gasUsed, events, err := s.validateTx(ctx, validationState, gasLimit, tx) - return appmanager.TxResult{ - Events: events, - GasUsed: gasUsed, - Error: err, - } -} - -// Query executes the query on the provided state with the provided gas limits. -func (s STF[T]) Query( - ctx context.Context, - state store.ReaderMap, - gasLimit uint64, - req transaction.Msg, -) (transaction.Msg, error) { - queryState := s.branchFn(state) - hi, err := s.getHeaderInfo(queryState) - if err != nil { - return nil, err - } - queryCtx := s.makeContext(ctx, nil, queryState, internal.ExecModeSimulate) - queryCtx.setHeaderInfo(hi) - queryCtx.setGasLimit(gasLimit) - return s.queryRouter.InvokeUntyped(queryCtx, req) -} - -// RunWithCtx is made to support genesis, if genesis was just the execution of messages instead -// of being something custom then we would not need this. PLEASE DO NOT USE. -// TODO: Remove -func (s STF[T]) RunWithCtx( - ctx context.Context, - state store.ReaderMap, - closure func(ctx context.Context) error, -) (store.WriterMap, error) { - branchedState := s.branchFn(state) - stfCtx := s.makeContext(ctx, nil, branchedState, internal.ExecModeFinalize) - return branchedState, closure(stfCtx) -} - -// clone clones STF. -func (s STF[T]) clone() STF[T] { - return STF[T]{ - logger: s.logger, - msgRouter: s.msgRouter, - queryRouter: s.queryRouter, - doPreBlock: s.doPreBlock, - doBeginBlock: s.doBeginBlock, - doEndBlock: s.doEndBlock, - doValidatorUpdate: s.doValidatorUpdate, - doTxValidation: s.doTxValidation, - postTxExec: s.postTxExec, - branchFn: s.branchFn, - makeGasMeter: s.makeGasMeter, - makeGasMeteredState: s.makeGasMeteredState, - } -} - -// executionContext is a struct that holds the context for the execution of a tx. -type executionContext struct { - context.Context - - // unmeteredState is storage without metering. Changes here are propagated to state which is the metered - // version. - unmeteredState store.WriterMap - // state is the gas metered state. - state store.WriterMap - // meter is the gas meter. - meter gas.Meter - // events are the current events. - events []event.Event - // sender is the causer of the state transition. - sender transaction.Identity - // headerInfo contains the block info. - headerInfo header.Info - // execMode retains information about the exec mode. - execMode transaction.ExecMode - - branchFn branchFn - makeGasMeter makeGasMeterFn - makeGasMeteredStore makeGasMeteredStateFn - - msgRouter router.Service - queryRouter router.Service -} - -// setHeaderInfo sets the header info in the state to be used by queries in the future. -func (e *executionContext) setHeaderInfo(hi header.Info) { - e.headerInfo = hi -} - -// setGasLimit will update the gas limit of the *executionContext -func (e *executionContext) setGasLimit(limit uint64) { - meter := e.makeGasMeter(limit) - meteredState := e.makeGasMeteredStore(meter, e.unmeteredState) - - e.meter = meter - e.state = meteredState -} - -// TODO: too many calls to makeContext can be expensive -// makeContext creates and returns a new execution context for the STF[T] type. -// It takes in the following parameters: -// - ctx: The context.Context object for the execution. -// - sender: The transaction.Identity object representing the sender of the transaction. -// - state: The store.WriterMap object for accessing and modifying the state. -// - gasLimit: The maximum amount of gas allowed for the execution. -// - execMode: The corecontext.ExecMode object representing the execution mode. -// -// It returns a pointer to the executionContext struct -func (s STF[T]) makeContext( - ctx context.Context, - sender transaction.Identity, - store store.WriterMap, - execMode transaction.ExecMode, -) *executionContext { - valuedCtx := context.WithValue(ctx, corecontext.ExecModeKey, execMode) - return newExecutionContext( - valuedCtx, - s.makeGasMeter, - s.makeGasMeteredState, - s.branchFn, - sender, - store, - execMode, - s.msgRouter, - s.queryRouter, - ) -} - -func newExecutionContext( - ctx context.Context, - makeGasMeterFn makeGasMeterFn, - makeGasMeteredStoreFn makeGasMeteredStateFn, - branchFn branchFn, - sender transaction.Identity, - state store.WriterMap, - execMode transaction.ExecMode, - msgRouter Router, - queryRouter Router, -) *executionContext { - meter := makeGasMeterFn(gas.NoGasLimit) - meteredState := makeGasMeteredStoreFn(meter, state) - - return &executionContext{ - Context: ctx, - unmeteredState: state, - state: meteredState, - meter: meter, - events: make([]event.Event, 0), - sender: sender, - headerInfo: header.Info{}, - execMode: execMode, - branchFn: branchFn, - makeGasMeter: makeGasMeterFn, - makeGasMeteredStore: makeGasMeteredStoreFn, - msgRouter: msgRouter, - queryRouter: queryRouter, - } -} - -// applyStateChanges applies the state changes from the source store to the destination store. -// It retrieves the state changes from the source store using GetStateChanges method, -// and then applies those changes to the destination store using ApplyStateChanges method. -// If an error occurs during the retrieval or application of state changes, it is returned. -func applyStateChanges(dst, src store.WriterMap) error { - changes, err := src.GetStateChanges() - if err != nil { - return err - } - return dst.ApplyStateChanges(changes) -} - -// isCtxCancelled reports if the context was canceled. -func isCtxCancelled(ctx context.Context) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - return nil - } -} diff --git a/server/v2/stf/stf_router.go b/server/v2/stf/stf_router.go deleted file mode 100644 index b54c537f85..0000000000 --- a/server/v2/stf/stf_router.go +++ /dev/null @@ -1,177 +0,0 @@ -package stf - -import ( - "context" - "errors" - "fmt" - "reflect" - - gogoproto "github.com/cosmos/gogoproto/proto" - - appmodulev2 "cosmossdk.io/core/appmodule/v2" - "cosmossdk.io/core/router" -) - -var ErrNoHandler = errors.New("no handler") - -// NewMsgRouterBuilder is a router that routes messages to their respective handlers. -func NewMsgRouterBuilder() *MsgRouterBuilder { - return &MsgRouterBuilder{ - handlers: make(map[string]appmodulev2.Handler), - preHandlers: make(map[string][]appmodulev2.PreMsgHandler), - postHandlers: make(map[string][]appmodulev2.PostMsgHandler), - } -} - -type MsgRouterBuilder struct { - handlers map[string]appmodulev2.Handler - globalPreHandlers []appmodulev2.PreMsgHandler - preHandlers map[string][]appmodulev2.PreMsgHandler - postHandlers map[string][]appmodulev2.PostMsgHandler - globalPostHandlers []appmodulev2.PostMsgHandler -} - -func (b *MsgRouterBuilder) RegisterHandler(msgType string, handler appmodulev2.Handler) error { - // panic on override - if _, ok := b.handlers[msgType]; ok { - return fmt.Errorf("handler already registered: %s", msgType) - } - b.handlers[msgType] = handler - return nil -} - -func (b *MsgRouterBuilder) RegisterGlobalPreHandler(handler appmodulev2.PreMsgHandler) { - b.globalPreHandlers = append(b.globalPreHandlers, handler) -} - -func (b *MsgRouterBuilder) RegisterPreHandler(msgType string, handler appmodulev2.PreMsgHandler) { - b.preHandlers[msgType] = append(b.preHandlers[msgType], handler) -} - -func (b *MsgRouterBuilder) RegisterPostHandler(msgType string, handler appmodulev2.PostMsgHandler) { - b.postHandlers[msgType] = append(b.postHandlers[msgType], handler) -} - -func (b *MsgRouterBuilder) RegisterGlobalPostHandler(handler appmodulev2.PostMsgHandler) { - b.globalPostHandlers = append(b.globalPostHandlers, handler) -} - -func (b *MsgRouterBuilder) HandlerExists(msgType string) bool { - _, ok := b.handlers[msgType] - return ok -} - -func (b *MsgRouterBuilder) Build() (Router, error) { - handlers := make(map[string]appmodulev2.Handler) - - globalPreHandler := func(ctx context.Context, msg appmodulev2.Message) error { - for _, h := range b.globalPreHandlers { - err := h(ctx, msg) - if err != nil { - return err - } - } - return nil - } - - globalPostHandler := func(ctx context.Context, msg, msgResp appmodulev2.Message) error { - for _, h := range b.globalPostHandlers { - err := h(ctx, msg, msgResp) - if err != nil { - return err - } - } - return nil - } - - for msgType, handler := range b.handlers { - // find pre handler - preHandlers := b.preHandlers[msgType] - // find post handler - postHandlers := b.postHandlers[msgType] - // build the handler - handlers[msgType] = buildHandler(handler, preHandlers, globalPreHandler, postHandlers, globalPostHandler) - } - - return Router{ - handlers: handlers, - }, nil -} - -func buildHandler( - handler appmodulev2.Handler, - preHandlers []appmodulev2.PreMsgHandler, - globalPreHandler appmodulev2.PreMsgHandler, - postHandlers []appmodulev2.PostMsgHandler, - globalPostHandler appmodulev2.PostMsgHandler, -) appmodulev2.Handler { - return func(ctx context.Context, msg appmodulev2.Message) (msgResp appmodulev2.Message, err error) { - if len(preHandlers) != 0 { - for _, preHandler := range preHandlers { - if err := preHandler(ctx, msg); err != nil { - return nil, err - } - } - } - err = globalPreHandler(ctx, msg) - if err != nil { - return nil, err - } - msgResp, err = handler(ctx, msg) - if err != nil { - return nil, err - } - - if len(postHandlers) != 0 { - for _, postHandler := range postHandlers { - if err := postHandler(ctx, msg, msgResp); err != nil { - return nil, err - } - } - } - err = globalPostHandler(ctx, msg, msgResp) - return msgResp, err - } -} - -// msgTypeURL returns the TypeURL of a proto message. -func msgTypeURL(msg gogoproto.Message) string { - return gogoproto.MessageName(msg) -} - -var _ router.Service = (*Router)(nil) - -// Router implements the STF router for msg and query handlers. -type Router struct { - handlers map[string]appmodulev2.Handler -} - -func (r Router) CanInvoke(_ context.Context, typeURL string) error { - _, exists := r.handlers[typeURL] - if !exists { - return fmt.Errorf("%w: %s", ErrNoHandler, typeURL) - } - return nil -} - -func (r Router) InvokeTyped(ctx context.Context, req, resp gogoproto.Message) error { - handlerResp, err := r.InvokeUntyped(ctx, req) - if err != nil { - return err - } - merge(handlerResp, resp) - return nil -} - -func merge(src, dst gogoproto.Message) { - reflect.Indirect(reflect.ValueOf(dst)).Set(reflect.Indirect(reflect.ValueOf(src))) -} - -func (r Router) InvokeUntyped(ctx context.Context, req gogoproto.Message) (res gogoproto.Message, err error) { - typeName := msgTypeURL(req) - handler, exists := r.handlers[typeName] - if !exists { - return nil, fmt.Errorf("%w: %s", ErrNoHandler, typeName) - } - return handler(ctx, req) -} diff --git a/server/v2/stf/stf_test.go b/server/v2/stf/stf_test.go deleted file mode 100644 index a77c126087..0000000000 --- a/server/v2/stf/stf_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package stf - -import ( - "context" - "crypto/sha256" - "fmt" - "testing" - "time" - - "github.com/cosmos/gogoproto/proto" - gogotypes "github.com/cosmos/gogoproto/types" - "github.com/stretchr/testify/require" - - appmanager "cosmossdk.io/core/app" - appmodulev2 "cosmossdk.io/core/appmodule/v2" - coregas "cosmossdk.io/core/gas" - "cosmossdk.io/core/store" - "cosmossdk.io/server/v2/stf/branch" - "cosmossdk.io/server/v2/stf/gas" - "cosmossdk.io/server/v2/stf/mock" -) - -func addMsgHandlerToSTF[T any, PT interface { - *T - proto.Message -}, - U any, UT interface { - *U - proto.Message - }]( - t *testing.T, - stf *STF[mock.Tx], - handler func(ctx context.Context, msg PT) (UT, error), -) { - t.Helper() - msgRouterBuilder := NewMsgRouterBuilder() - err := msgRouterBuilder.RegisterHandler( - msgTypeURL(PT(new(T))), - func(ctx context.Context, msg appmodulev2.Message) (msgResp appmodulev2.Message, err error) { - typedReq := msg.(PT) - typedResp, err := handler(ctx, typedReq) - if err != nil { - return nil, err - } - - return typedResp, nil - }, - ) - require.NoError(t, err) - - msgRouter, err := msgRouterBuilder.Build() - require.NoError(t, err) - stf.msgRouter = msgRouter -} - -func TestSTF(t *testing.T) { - state := mock.DB() - mockTx := mock.Tx{ - Sender: []byte("sender"), - Msg: &gogotypes.BoolValue{Value: true}, - GasLimit: 100_000, - } - - sum := sha256.Sum256([]byte("test-hash")) - - s := &STF[mock.Tx]{ - doPreBlock: func(ctx context.Context, txs []mock.Tx) error { return nil }, - doBeginBlock: func(ctx context.Context) error { - kvSet(t, ctx, "begin-block") - return nil - }, - doEndBlock: func(ctx context.Context) error { - kvSet(t, ctx, "end-block") - return nil - }, - doValidatorUpdate: func(ctx context.Context) ([]appmodulev2.ValidatorUpdate, error) { return nil, nil }, - doTxValidation: func(ctx context.Context, tx mock.Tx) error { - kvSet(t, ctx, "validate") - return nil - }, - postTxExec: func(ctx context.Context, tx mock.Tx, success bool) error { - kvSet(t, ctx, "post-tx-exec") - return nil - }, - branchFn: branch.DefaultNewWriterMap, - makeGasMeter: gas.DefaultGasMeter, - makeGasMeteredState: gas.DefaultWrapWithGasMeter, - } - - addMsgHandlerToSTF(t, s, func(ctx context.Context, msg *gogotypes.BoolValue) (*gogotypes.BoolValue, error) { - kvSet(t, ctx, "exec") - return nil, nil - }) - - t.Run("begin and end block", func(t *testing.T) { - _, newState, err := s.DeliverBlock(context.Background(), &appmanager.BlockRequest[mock.Tx]{ - Height: uint64(1), - Time: time.Date(2024, 2, 3, 18, 23, 0, 0, time.UTC), - AppHash: sum[:], - Hash: sum[:], - }, state) - require.NoError(t, err) - stateHas(t, newState, "begin-block") - stateHas(t, newState, "end-block") - }) - - t.Run("basic tx", func(t *testing.T) { - result, newState, err := s.DeliverBlock(context.Background(), &appmanager.BlockRequest[mock.Tx]{ - Height: uint64(1), - Time: time.Date(2024, 2, 3, 18, 23, 0, 0, time.UTC), - AppHash: sum[:], - Hash: sum[:], - Txs: []mock.Tx{mockTx}, - }, state) - require.NoError(t, err) - stateHas(t, newState, "validate") - stateHas(t, newState, "exec") - stateHas(t, newState, "post-tx-exec") - - require.Len(t, result.TxResults, 1) - txResult := result.TxResults[0] - require.NotZero(t, txResult.GasUsed) - require.Equal(t, mockTx.GasLimit, txResult.GasWanted) - }) - - t.Run("exec tx out of gas", func(t *testing.T) { - s := s.clone() - - mockTx := mock.Tx{ - Sender: []byte("sender"), - Msg: &gogotypes.BoolValue{Value: true}, // msg does not matter at all because our handler does nothing. - GasLimit: 0, // NO GAS! - } - - // this handler will propagate the storage error back, we expect - // out of gas immediately at tx validation level. - s.doTxValidation = func(ctx context.Context, tx mock.Tx) error { - w, err := ctx.(*executionContext).state.GetWriter(actorName) - require.NoError(t, err) - err = w.Set([]byte("gas_failure"), []byte{}) - require.Error(t, err) - return err - } - - result, newState, err := s.DeliverBlock(context.Background(), &appmanager.BlockRequest[mock.Tx]{ - Height: uint64(1), - Time: time.Date(2024, 2, 3, 18, 23, 0, 0, time.UTC), - AppHash: sum[:], - Hash: sum[:], - Txs: []mock.Tx{mockTx}, - }, state) - require.NoError(t, err) - stateNotHas(t, newState, "gas_failure") // assert during out of gas no state changes leaked. - require.ErrorIs(t, result.TxResults[0].Error, coregas.ErrOutOfGas, result.TxResults[0].Error) - }) - - t.Run("fail exec tx", func(t *testing.T) { - // update the stf to fail on the handler - s := s.clone() - addMsgHandlerToSTF(t, &s, func(ctx context.Context, msg *gogotypes.BoolValue) (*gogotypes.BoolValue, error) { - return nil, fmt.Errorf("failure") - }) - - blockResult, newState, err := s.DeliverBlock(context.Background(), &appmanager.BlockRequest[mock.Tx]{ - Height: uint64(1), - Time: time.Date(2024, 2, 3, 18, 23, 0, 0, time.UTC), - AppHash: sum[:], - Hash: sum[:], - Txs: []mock.Tx{mockTx}, - }, state) - require.NoError(t, err) - require.ErrorContains(t, blockResult.TxResults[0].Error, "failure") - stateHas(t, newState, "begin-block") - stateHas(t, newState, "end-block") - stateHas(t, newState, "validate") - stateNotHas(t, newState, "exec") - stateHas(t, newState, "post-tx-exec") - }) - - t.Run("tx is success but post tx failed", func(t *testing.T) { - s := s.clone() - s.postTxExec = func(ctx context.Context, tx mock.Tx, success bool) error { - return fmt.Errorf("post tx failure") - } - blockResult, newState, err := s.DeliverBlock(context.Background(), &appmanager.BlockRequest[mock.Tx]{ - Height: uint64(1), - Time: time.Date(2024, 2, 3, 18, 23, 0, 0, time.UTC), - AppHash: sum[:], - Hash: sum[:], - Txs: []mock.Tx{mockTx}, - }, state) - require.NoError(t, err) - require.ErrorContains(t, blockResult.TxResults[0].Error, "post tx failure") - stateHas(t, newState, "begin-block") - stateHas(t, newState, "end-block") - stateHas(t, newState, "validate") - stateNotHas(t, newState, "exec") - stateNotHas(t, newState, "post-tx-exec") - }) - - t.Run("tx failed and post tx failed", func(t *testing.T) { - s := s.clone() - addMsgHandlerToSTF(t, &s, func(ctx context.Context, msg *gogotypes.BoolValue) (*gogotypes.BoolValue, error) { - return nil, fmt.Errorf("exec failure") - }) - s.postTxExec = func(ctx context.Context, tx mock.Tx, success bool) error { return fmt.Errorf("post tx failure") } - blockResult, newState, err := s.DeliverBlock(context.Background(), &appmanager.BlockRequest[mock.Tx]{ - Height: uint64(1), - Time: time.Date(2024, 2, 3, 18, 23, 0, 0, time.UTC), - AppHash: sum[:], - Hash: sum[:], - Txs: []mock.Tx{mockTx}, - }, state) - require.NoError(t, err) - require.ErrorContains(t, blockResult.TxResults[0].Error, "exec failure\npost tx failure") - stateHas(t, newState, "begin-block") - stateHas(t, newState, "end-block") - stateHas(t, newState, "validate") - stateNotHas(t, newState, "exec") - stateNotHas(t, newState, "post-tx-exec") - }) - - t.Run("fail validate tx", func(t *testing.T) { - // update stf to fail on the validation step - s := s.clone() - s.doTxValidation = func(ctx context.Context, tx mock.Tx) error { return fmt.Errorf("failure") } - blockResult, newState, err := s.DeliverBlock(context.Background(), &appmanager.BlockRequest[mock.Tx]{ - Height: uint64(1), - Time: time.Date(2024, 2, 3, 18, 23, 0, 0, time.UTC), - AppHash: sum[:], - Hash: sum[:], - Txs: []mock.Tx{mockTx}, - }, state) - require.NoError(t, err) - require.ErrorContains(t, blockResult.TxResults[0].Error, "failure") - stateHas(t, newState, "begin-block") - stateHas(t, newState, "end-block") - stateNotHas(t, newState, "validate") - stateNotHas(t, newState, "exec") - }) -} - -var actorName = []byte("cookies") - -func kvSet(t *testing.T, ctx context.Context, v string) { - t.Helper() - state, err := ctx.(*executionContext).state.GetWriter(actorName) - require.NoError(t, err) - require.NoError(t, state.Set([]byte(v), []byte(v))) -} - -func stateHas(t *testing.T, accountState store.ReaderMap, key string) { - t.Helper() - state, err := accountState.GetReader(actorName) - require.NoError(t, err) - has, err := state.Has([]byte(key)) - require.NoError(t, err) - require.Truef(t, has, "state did not have key: %s", key) -} - -func stateNotHas(t *testing.T, accountState store.ReaderMap, key string) { - t.Helper() - state, err := accountState.GetReader(actorName) - require.NoError(t, err) - has, err := state.Has([]byte(key)) - require.NoError(t, err) - require.Falsef(t, has, "state was not supposed to have key: %s", key) -} diff --git a/server/v2/streaming/README.md b/server/v2/streaming/README.md deleted file mode 100644 index a06f8685d3..0000000000 --- a/server/v2/streaming/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Cosmos-SDK Plugins - -This package contains an extensible plugin system for the Cosmos-SDK. The plugin system leverages the [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin) system. This system is designed to work over RPC. - -Although the `go-plugin` is built to work over RPC, it is currently only designed to work over a local network. - -## Pre requisites - -For an overview of supported features by the `go-plugin` system, please see https://github.com/hashicorp/go-plugin. The `go-plugin` documentation is located [here](https://github.com/hashicorp/go-plugin/tree/master/docs). You can also directly visit any of the links below: - -* [Writing plugins without Go](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md) -* [Go Plugin Tutorial](https://github.com/hashicorp/go-plugin/blob/master/docs/extensive-go-plugin-tutorial.md) -* [Plugin Internals](https://github.com/hashicorp/go-plugin/blob/master/docs/internals.md) -* [Plugin Architecture](https://www.youtube.com/watch?v=SRvm3zQQc1Q) (start here) - -## Exposing plugins - -To expose plugins to the plugin system, you will need to: - -1. Implement the gRPC message protocol service of the plugin -2. Build the plugin binary -3. Export it - -Read the plugin documentation in the [Streaming Plugins](#streaming-plugins) section for examples on how to build a plugin. - -## Streaming Plugins - -List of support streaming plugins - -* [State Streaming Plugin](plugin.md) diff --git a/server/v2/streaming/config.go b/server/v2/streaming/config.go deleted file mode 100644 index 5748e19e3f..0000000000 --- a/server/v2/streaming/config.go +++ /dev/null @@ -1,25 +0,0 @@ -package streaming - -// State Streaming configuration - -// StreamingConfig defines application configuration for external streaming services -type StreamingConfig struct { - ListenerConfig ListenerConfig `mapstructure:"listener-config" toml:"listener-config" comment:"ListenerConfig defines application configuration for ABCIListener streaming service"` -} - -// ListenerConfig defines application configuration for ABCIListener streaming service -type ListenerConfig struct { - // List of kv store keys to stream out via gRPC. - // The store key names MUST match the module's StoreKey name. - // - // Example: - // ["acc", "bank", "gov", "staking", "mint"[,...]] - // ["*"] to expose all keys. - Keys []string `mapstructure:"keys" toml:"keys" comment:"List of kv store keys to stream out via gRPC. The store key names MUST match the module's StoreKey name. Example: [\"acc\", \"bank\", \"gov\", \"staking\", \"mint\"[,...]] [\"*\"] to expose all keys."` - // The plugin name used for streaming via gRPC. - // Streaming is only enabled if this is set. - // Supported plugins: abci - Plugin string `mapstructure:"plugin" toml:"plugin" comment:"The plugin name used for streaming via gRPC. Streaming is only enabled if this is set. Supported plugins: abci"` - // stop-node-on-err specifies whether to stop the node on message delivery error. - StopNodeOnErr bool `mapstructure:"stop-node-on-err" toml:"stop-node-on-err" comment:"stop-node-on-err specifies whether to stop the node on message delivery error."` -} diff --git a/server/v2/streaming/context.go b/server/v2/streaming/context.go deleted file mode 100644 index 881f0c5e45..0000000000 --- a/server/v2/streaming/context.go +++ /dev/null @@ -1,11 +0,0 @@ -package streaming - -import "cosmossdk.io/core/log" - -// Context is an interface used by an App to pass context information -// needed to process store streaming requests. -type Context interface { - BlockHeight() int64 - Logger() log.Logger - StreamingManager() Manager -} diff --git a/server/v2/streaming/examples/file/.gitignore b/server/v2/streaming/examples/file/.gitignore deleted file mode 100644 index bc8ff79063..0000000000 --- a/server/v2/streaming/examples/file/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# ignore the file plugin binary -file \ No newline at end of file diff --git a/server/v2/streaming/examples/file/README.md b/server/v2/streaming/examples/file/README.md deleted file mode 100644 index 27e5f8956e..0000000000 --- a/server/v2/streaming/examples/file/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# File Plugin - -The file plugin is an example plugin written in Go. It is intended for local testing and should not be used in production environments. - -## Build - -To build the plugin run the following command: - -```shell -cd store -``` - -```shell -go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go -``` - -* The plugin will write files to the users home directory `~/`. diff --git a/server/v2/streaming/examples/file/file.go b/server/v2/streaming/examples/file/file.go deleted file mode 100644 index 607cb8ed62..0000000000 --- a/server/v2/streaming/examples/file/file.go +++ /dev/null @@ -1,78 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "cosmossdk.io/server/v2/streaming" - "github.com/hashicorp/go-plugin" -) - -// FilePlugin is the implementation of the baseapp.ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type FilePlugin struct { - BlockHeight int64 -} - -func (a *FilePlugin) writeToFile(file string, data []byte) error { - home, err := os.UserHomeDir() - if err != nil { - return err - } - - filename := fmt.Sprintf("%s/%s.txt", home, file) - f, err := os.OpenFile(filepath.Clean(filename), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600) - if err != nil { - return err - } - - if _, err := f.Write(data); err != nil { - f.Close() // ignore error; Write error takes precedence - return err - } - - if err := f.Close(); err != nil { - return err - } - - return nil -} - -func (a *FilePlugin) ListenDeliverBlock(ctx context.Context, req streaming.ListenDeliverBlockRequest) error { - d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) - d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) - if err := a.writeToFile("finalize-block-req", d1); err != nil { - return err - } - if err := a.writeToFile("finalize-block-res", d2); err != nil { - return err - } - return nil -} - -func (a *FilePlugin) ListenStateChanges(ctx context.Context, changeSet []*streaming.StoreKVPair) error { - fmt.Printf("listen-commit: block_height=%d data=%v", a.BlockHeight, changeSet) - d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, nil)) - d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, changeSet)) - if err := a.writeToFile("commit-res", d1); err != nil { - return err - } - if err := a.writeToFile("state-change", d2); err != nil { - return err - } - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: streaming.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &streaming.ListenerGRPCPlugin{Impl: &FilePlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/server/v2/streaming/examples/stdout/stdout b/server/v2/streaming/examples/stdout/stdout deleted file mode 100755 index 93f61a7b93..0000000000 Binary files a/server/v2/streaming/examples/stdout/stdout and /dev/null differ diff --git a/server/v2/streaming/examples/stdout/stdout.go b/server/v2/streaming/examples/stdout/stdout.go deleted file mode 100644 index b09b9acf10..0000000000 --- a/server/v2/streaming/examples/stdout/stdout.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "context" - "fmt" - - "github.com/hashicorp/go-plugin" - - "cosmossdk.io/server/v2/streaming" -) - -// StdoutPlugin is the implementation of the ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type StdoutPlugin struct { - BlockHeight int64 -} - -func (a *StdoutPlugin) ListenDeliverBlock(ctx context.Context, req streaming.ListenDeliverBlockRequest) error { - a.BlockHeight = req.BlockHeight - // process tx messages (i.e: sent to external system) - fmt.Printf("listen-finalize-block: block-height=%d req=%v res=%v", a.BlockHeight, req, nil) - return nil -} - -func (a *StdoutPlugin) ListenStateChanges(ctx context.Context, changeSet []*streaming.StoreKVPair) error { - // process block commit messages (i.e: sent to external system) - fmt.Printf("listen-commit: block_height=%d res=%v data=%v", a.BlockHeight, changeSet, nil) - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: streaming.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &streaming.ListenerGRPCPlugin{Impl: &StdoutPlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/server/v2/streaming/grpc.go b/server/v2/streaming/grpc.go deleted file mode 100644 index 38d9c4320b..0000000000 --- a/server/v2/streaming/grpc.go +++ /dev/null @@ -1,77 +0,0 @@ -package streaming - -import ( - "context" - "os" - - "github.com/hashicorp/go-plugin" -) - -var _ Listener = (*GRPCClient)(nil) - -// GRPCClient is an implementation of the ABCIListener interface that talks over RPC. -type GRPCClient struct { - client ListenerServiceClient -} - -// ListenDeliverBlock listens for block delivery requests and responses. -// It retrieves a types.Context from the provided context.Context. -// If the node is configured to stop on listening errors, it will terminate -// and exit with a non-zero code upon encountering an error. -// -// Panics if a types.Context is not properly attached to the provided context.Context. -func (m *GRPCClient) ListenDeliverBlock(goCtx context.Context, req ListenDeliverBlockRequest) error { - ctx := goCtx.(Context) - sm := ctx.StreamingManager() - _, err := m.client.ListenDeliverBlock(goCtx, &req) - if err != nil && sm.StopNodeOnErr { - ctx.Logger().Error("DeliverBLock listening hook failed", "height", ctx.BlockHeight(), "err", err) - cleanupAndExit() - } - return err -} - -// ListenStateChanges listens for state changes in the current block. -// It retrieves a types.Context from the provided context.Context. -// If the node is configured to stop on listening errors, it will terminate -// and exit with a non-zero code upon encountering an error. -// -// Panics if a types.Context is not properly attached to the provided context.Context. -func (m *GRPCClient) ListenStateChanges(goCtx context.Context, changeSet []*StoreKVPair) error { - ctx := goCtx.(Context) - sm := ctx.StreamingManager() - request := &ListenStateChangesRequest{BlockHeight: ctx.BlockHeight(), ChangeSet: changeSet} - _, err := m.client.ListenStateChanges(goCtx, request) - if err != nil && sm.StopNodeOnErr { - ctx.Logger().Error("Commit listening hook failed", "height", ctx.BlockHeight(), "err", err) - cleanupAndExit() - } - return err -} - -func cleanupAndExit() { - plugin.CleanupClients() - os.Exit(1) -} - -var _ ListenerServiceServer = (*GRPCServer)(nil) - -// GRPCServer is the gRPC server that GRPCClient talks to. -type GRPCServer struct { - // This is the real implementation - Impl Listener -} - -func (m GRPCServer) ListenDeliverBlock(ctx context.Context, request *ListenDeliverBlockRequest) (*ListenDeliverBlockResponse, error) { - if err := m.Impl.ListenDeliverBlock(ctx, *request); err != nil { - return nil, err - } - return &ListenDeliverBlockResponse{}, nil -} - -func (m GRPCServer) ListenStateChanges(ctx context.Context, request *ListenStateChangesRequest) (*ListenStateChangesResponse, error) { - if err := m.Impl.ListenStateChanges(ctx, request.ChangeSet); err != nil { - return nil, err - } - return &ListenStateChangesResponse{}, nil -} diff --git a/server/v2/streaming/grpc.pb.go b/server/v2/streaming/grpc.pb.go deleted file mode 100644 index 4d2d15149c..0000000000 --- a/server/v2/streaming/grpc.pb.go +++ /dev/null @@ -1,2414 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/streaming/v1/grpc.proto - -package streaming - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ListenDeliverBlockRequest is the request type for the ListenDeliverBlock RPC method -type ListenDeliverBlockRequest struct { - BlockHeight int64 `protobuf:"varint,1,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` - Events []*Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` - TxResults []*ExecTxResult `protobuf:"bytes,4,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` -} - -func (m *ListenDeliverBlockRequest) Reset() { *m = ListenDeliverBlockRequest{} } -func (m *ListenDeliverBlockRequest) String() string { return proto.CompactTextString(m) } -func (*ListenDeliverBlockRequest) ProtoMessage() {} -func (*ListenDeliverBlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{0} -} -func (m *ListenDeliverBlockRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenDeliverBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenDeliverBlockRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenDeliverBlockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenDeliverBlockRequest.Merge(m, src) -} -func (m *ListenDeliverBlockRequest) XXX_Size() int { - return m.Size() -} -func (m *ListenDeliverBlockRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenDeliverBlockRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenDeliverBlockRequest proto.InternalMessageInfo - -func (m *ListenDeliverBlockRequest) GetBlockHeight() int64 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *ListenDeliverBlockRequest) GetTxs() [][]byte { - if m != nil { - return m.Txs - } - return nil -} - -func (m *ListenDeliverBlockRequest) GetEvents() []*Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *ListenDeliverBlockRequest) GetTxResults() []*ExecTxResult { - if m != nil { - return m.TxResults - } - return nil -} - -// ListenDeliverBlockResponse is the response type for the ListenDeliverBlock RPC method -type ListenDeliverBlockResponse struct { -} - -func (m *ListenDeliverBlockResponse) Reset() { *m = ListenDeliverBlockResponse{} } -func (m *ListenDeliverBlockResponse) String() string { return proto.CompactTextString(m) } -func (*ListenDeliverBlockResponse) ProtoMessage() {} -func (*ListenDeliverBlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{1} -} -func (m *ListenDeliverBlockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenDeliverBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenDeliverBlockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenDeliverBlockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenDeliverBlockResponse.Merge(m, src) -} -func (m *ListenDeliverBlockResponse) XXX_Size() int { - return m.Size() -} -func (m *ListenDeliverBlockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListenDeliverBlockResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenDeliverBlockResponse proto.InternalMessageInfo - -// ListenStateChangesRequest is the request type for the ListenStateChanges RPC method -type ListenStateChangesRequest struct { - BlockHeight int64 `protobuf:"varint,1,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - ChangeSet []*StoreKVPair `protobuf:"bytes,2,rep,name=change_set,json=changeSet,proto3" json:"change_set,omitempty"` - AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` -} - -func (m *ListenStateChangesRequest) Reset() { *m = ListenStateChangesRequest{} } -func (m *ListenStateChangesRequest) String() string { return proto.CompactTextString(m) } -func (*ListenStateChangesRequest) ProtoMessage() {} -func (*ListenStateChangesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{2} -} -func (m *ListenStateChangesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenStateChangesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenStateChangesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenStateChangesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenStateChangesRequest.Merge(m, src) -} -func (m *ListenStateChangesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListenStateChangesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenStateChangesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenStateChangesRequest proto.InternalMessageInfo - -func (m *ListenStateChangesRequest) GetBlockHeight() int64 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *ListenStateChangesRequest) GetChangeSet() []*StoreKVPair { - if m != nil { - return m.ChangeSet - } - return nil -} - -func (m *ListenStateChangesRequest) GetAppHash() []byte { - if m != nil { - return m.AppHash - } - return nil -} - -// ListenStateChangesResponse is the response type for the ListenStateChanges RPC method -type ListenStateChangesResponse struct { -} - -func (m *ListenStateChangesResponse) Reset() { *m = ListenStateChangesResponse{} } -func (m *ListenStateChangesResponse) String() string { return proto.CompactTextString(m) } -func (*ListenStateChangesResponse) ProtoMessage() {} -func (*ListenStateChangesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{3} -} -func (m *ListenStateChangesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenStateChangesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenStateChangesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenStateChangesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenStateChangesResponse.Merge(m, src) -} -func (m *ListenStateChangesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListenStateChangesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListenStateChangesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenStateChangesResponse proto.InternalMessageInfo - -// StoreKVPair is a single key-value pair, associated with a store. -type StoreKVPair struct { - // address defines the address of the account the state changes are coming from. - // In case of modules you can expect a stringified - Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - // key defines the key of the address that changed. - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - // value defines the value that changed, empty in case of removal. - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - // delete defines if the key was removed. - Delete bool `protobuf:"varint,4,opt,name=delete,proto3" json:"delete,omitempty"` -} - -func (m *StoreKVPair) Reset() { *m = StoreKVPair{} } -func (m *StoreKVPair) String() string { return proto.CompactTextString(m) } -func (*StoreKVPair) ProtoMessage() {} -func (*StoreKVPair) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{4} -} -func (m *StoreKVPair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StoreKVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StoreKVPair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StoreKVPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_StoreKVPair.Merge(m, src) -} -func (m *StoreKVPair) XXX_Size() int { - return m.Size() -} -func (m *StoreKVPair) XXX_DiscardUnknown() { - xxx_messageInfo_StoreKVPair.DiscardUnknown(m) -} - -var xxx_messageInfo_StoreKVPair proto.InternalMessageInfo - -func (m *StoreKVPair) GetAddress() []byte { - if m != nil { - return m.Address - } - return nil -} - -func (m *StoreKVPair) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *StoreKVPair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *StoreKVPair) GetDelete() bool { - if m != nil { - return m.Delete - } - return false -} - -// Event is a single event, associated with a transaction. -type Event struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Attributes []*EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{5} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Event proto.InternalMessageInfo - -func (m *Event) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Event) GetAttributes() []*EventAttribute { - if m != nil { - return m.Attributes - } - return nil -} - -// EventAttribute is a single key-value pair, associated with an event. -type EventAttribute struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *EventAttribute) Reset() { *m = EventAttribute{} } -func (m *EventAttribute) String() string { return proto.CompactTextString(m) } -func (*EventAttribute) ProtoMessage() {} -func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{6} -} -func (m *EventAttribute) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EventAttribute.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EventAttribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventAttribute.Merge(m, src) -} -func (m *EventAttribute) XXX_Size() int { - return m.Size() -} -func (m *EventAttribute) XXX_DiscardUnknown() { - xxx_messageInfo_EventAttribute.DiscardUnknown(m) -} - -var xxx_messageInfo_EventAttribute proto.InternalMessageInfo - -func (m *EventAttribute) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *EventAttribute) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// ExecTxResult contains results of executing one individual transaction. -type ExecTxResult struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` - Events []*Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` -} - -func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } -func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } -func (*ExecTxResult) ProtoMessage() {} -func (*ExecTxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_3fc151d30622bb2a, []int{7} -} -func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecTxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecTxResult.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecTxResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecTxResult.Merge(m, src) -} -func (m *ExecTxResult) XXX_Size() int { - return m.Size() -} -func (m *ExecTxResult) XXX_DiscardUnknown() { - xxx_messageInfo_ExecTxResult.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecTxResult proto.InternalMessageInfo - -func (m *ExecTxResult) GetCode() uint32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *ExecTxResult) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ExecTxResult) GetLog() string { - if m != nil { - return m.Log - } - return "" -} - -func (m *ExecTxResult) GetInfo() string { - if m != nil { - return m.Info - } - return "" -} - -func (m *ExecTxResult) GetGasWanted() int64 { - if m != nil { - return m.GasWanted - } - return 0 -} - -func (m *ExecTxResult) GetGasUsed() int64 { - if m != nil { - return m.GasUsed - } - return 0 -} - -func (m *ExecTxResult) GetEvents() []*Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *ExecTxResult) GetCodespace() string { - if m != nil { - return m.Codespace - } - return "" -} - -func init() { - proto.RegisterType((*ListenDeliverBlockRequest)(nil), "cosmos.streaming.v1.ListenDeliverBlockRequest") - proto.RegisterType((*ListenDeliverBlockResponse)(nil), "cosmos.streaming.v1.ListenDeliverBlockResponse") - proto.RegisterType((*ListenStateChangesRequest)(nil), "cosmos.streaming.v1.ListenStateChangesRequest") - proto.RegisterType((*ListenStateChangesResponse)(nil), "cosmos.streaming.v1.ListenStateChangesResponse") - proto.RegisterType((*StoreKVPair)(nil), "cosmos.streaming.v1.StoreKVPair") - proto.RegisterType((*Event)(nil), "cosmos.streaming.v1.Event") - proto.RegisterType((*EventAttribute)(nil), "cosmos.streaming.v1.EventAttribute") - proto.RegisterType((*ExecTxResult)(nil), "cosmos.streaming.v1.ExecTxResult") -} - -func init() { proto.RegisterFile("cosmos/streaming/v1/grpc.proto", fileDescriptor_3fc151d30622bb2a) } - -var fileDescriptor_3fc151d30622bb2a = []byte{ - // 599 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xae, 0x9b, 0x36, 0xa9, 0x27, 0xe1, 0x47, 0x0b, 0x42, 0x6e, 0x55, 0x2c, 0x37, 0x5c, 0x72, - 0x72, 0xd4, 0x70, 0x41, 0x5c, 0x80, 0x16, 0xa4, 0x4a, 0x70, 0x40, 0x1b, 0x7e, 0x24, 0x2e, 0x61, - 0x6b, 0x0f, 0x8e, 0x55, 0xd7, 0x36, 0x3b, 0x6b, 0x93, 0xbe, 0x05, 0x67, 0x1e, 0x08, 0x71, 0xec, - 0x91, 0x23, 0x6a, 0x2f, 0xbc, 0x05, 0x68, 0xd7, 0x6e, 0x93, 0x0a, 0xb7, 0x2a, 0xb7, 0x6f, 0x7e, - 0xbf, 0x6f, 0x3c, 0xe3, 0x05, 0x37, 0xc8, 0xe8, 0x30, 0xa3, 0x21, 0x29, 0x89, 0xe2, 0x30, 0x4e, - 0xa3, 0x61, 0xb9, 0x3d, 0x8c, 0x64, 0x1e, 0xf8, 0xb9, 0xcc, 0x54, 0xc6, 0xee, 0x54, 0x71, 0xff, - 0x3c, 0xee, 0x97, 0xdb, 0xfd, 0xef, 0x16, 0xac, 0xbf, 0x8a, 0x49, 0x61, 0xfa, 0x1c, 0x93, 0xb8, - 0x44, 0xb9, 0x93, 0x64, 0xc1, 0x01, 0xc7, 0xcf, 0x05, 0x92, 0x62, 0x5b, 0xd0, 0xdb, 0xd7, 0xf6, - 0x64, 0x8a, 0x71, 0x34, 0x55, 0x8e, 0xe5, 0x59, 0x83, 0x16, 0xef, 0x1a, 0xdf, 0x9e, 0x71, 0xb1, - 0xdb, 0xd0, 0x52, 0x33, 0x72, 0x96, 0xbd, 0xd6, 0xa0, 0xc7, 0x35, 0x64, 0x23, 0x68, 0x63, 0x89, - 0xa9, 0x22, 0xa7, 0xe5, 0xb5, 0x06, 0xdd, 0xd1, 0x86, 0xdf, 0x40, 0xec, 0xbf, 0xd0, 0x29, 0xbc, - 0xce, 0x64, 0x4f, 0x01, 0xd4, 0x6c, 0x22, 0x91, 0x8a, 0x44, 0x91, 0xb3, 0x62, 0xea, 0xb6, 0x9a, - 0xeb, 0x66, 0x18, 0xbc, 0x99, 0x71, 0x93, 0xc9, 0x6d, 0x55, 0x23, 0xea, 0x6f, 0xc2, 0x46, 0xd3, - 0x1c, 0x94, 0x67, 0x29, 0x61, 0xff, 0xdb, 0xf9, 0x98, 0x63, 0x25, 0x14, 0xee, 0x4e, 0x45, 0x1a, - 0x21, 0xfd, 0xc7, 0x98, 0x4f, 0x00, 0x02, 0x53, 0x34, 0x21, 0x54, 0x66, 0xda, 0xee, 0xc8, 0x6b, - 0x14, 0x38, 0x56, 0x99, 0xc4, 0x97, 0xef, 0x5e, 0x8b, 0x58, 0x72, 0xbb, 0xaa, 0x19, 0xa3, 0x62, - 0xeb, 0xb0, 0x26, 0xf2, 0x7c, 0x32, 0x15, 0x34, 0x75, 0x5a, 0x9e, 0x35, 0xe8, 0xf1, 0x8e, 0xc8, - 0xf3, 0x3d, 0x41, 0xd3, 0xb9, 0xf4, 0x8b, 0xda, 0x6a, 0xe9, 0x11, 0x74, 0x17, 0x5a, 0x32, 0x07, - 0x3a, 0x22, 0x0c, 0x25, 0x12, 0x19, 0x99, 0xba, 0x4d, 0x65, 0xea, 0x4d, 0x1c, 0xe0, 0x91, 0xb3, - 0x6c, 0xbc, 0x1a, 0xb2, 0xbb, 0xb0, 0x5a, 0x8a, 0xa4, 0xc0, 0x9a, 0xb0, 0x32, 0xd8, 0x3d, 0x68, - 0x87, 0x98, 0xa0, 0x42, 0x67, 0xc5, 0xb3, 0x06, 0x6b, 0xbc, 0xb6, 0xfa, 0x1f, 0x61, 0xd5, 0x2c, - 0x85, 0x31, 0x58, 0x51, 0x47, 0x39, 0x9a, 0xfe, 0x36, 0x37, 0x98, 0xed, 0x02, 0x08, 0xa5, 0x64, - 0xbc, 0x5f, 0x28, 0xa4, 0x7a, 0xfe, 0x07, 0x97, 0x2f, 0xf6, 0xd9, 0x59, 0x2e, 0x5f, 0x28, 0xeb, - 0x3f, 0x82, 0x9b, 0x17, 0xa3, 0x67, 0x9a, 0x2b, 0xa6, 0x8b, 0x9a, 0x97, 0x8d, 0xaf, 0x32, 0xfa, - 0xbf, 0x2d, 0xe8, 0x2d, 0x6e, 0x5e, 0x6b, 0x0c, 0xb2, 0xb0, 0xd2, 0x78, 0x83, 0x1b, 0xac, 0x7d, - 0xa1, 0x50, 0xa2, 0xfe, 0x02, 0x06, 0x6b, 0x82, 0x24, 0x8b, 0xcc, 0x07, 0xb0, 0xb9, 0x86, 0x3a, - 0x2b, 0x4e, 0x3f, 0x65, 0x66, 0x78, 0x9b, 0x1b, 0xcc, 0xee, 0x03, 0x44, 0x82, 0x26, 0x5f, 0x44, - 0xaa, 0x30, 0x74, 0x56, 0xcd, 0xfa, 0xed, 0x48, 0xd0, 0x7b, 0xe3, 0xd0, 0xbb, 0xd3, 0xe1, 0x82, - 0x30, 0x74, 0xda, 0x26, 0xd8, 0x89, 0x04, 0xbd, 0x25, 0x0c, 0x17, 0x8e, 0xbd, 0x73, 0xed, 0x63, - 0xdf, 0x04, 0x5b, 0xeb, 0xa5, 0x5c, 0x04, 0xe8, 0xac, 0x19, 0x19, 0x73, 0xc7, 0xe8, 0x8f, 0x05, - 0xb7, 0xaa, 0x73, 0x40, 0x39, 0x46, 0x59, 0xc6, 0x01, 0xb2, 0x02, 0xd8, 0xbf, 0xc7, 0xcd, 0xfc, - 0x46, 0xae, 0x4b, 0xff, 0xe6, 0x8d, 0xe1, 0xb5, 0xf3, 0xab, 0xd3, 0x9b, 0xd3, 0x2e, 0x1e, 0xe6, - 0x95, 0xb4, 0x0d, 0x7f, 0xd7, 0x95, 0xb4, 0x4d, 0x17, 0xbf, 0xf3, 0xf8, 0xc7, 0x89, 0x6b, 0x1d, - 0x9f, 0xb8, 0xd6, 0xaf, 0x13, 0xd7, 0xfa, 0x7a, 0xea, 0x2e, 0x1d, 0x9f, 0xba, 0x4b, 0x3f, 0x4f, - 0xdd, 0xa5, 0x0f, 0x5e, 0xd5, 0x89, 0xc2, 0x03, 0x3f, 0xce, 0x86, 0x84, 0xb2, 0x44, 0x39, 0x2c, - 0x47, 0xf3, 0x27, 0x6f, 0xbf, 0x6d, 0xde, 0xba, 0x87, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x3b, - 0xbd, 0x03, 0xda, 0x0d, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ListenerServiceClient is the client API for ListenerService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ListenerServiceClient interface { - // ListenDeliverBlock is the corresponding endpoint for Listener.ListenDeliverBlock - ListenDeliverBlock(ctx context.Context, in *ListenDeliverBlockRequest, opts ...grpc.CallOption) (*ListenDeliverBlockResponse, error) - // ListenStateChanges is the corresponding endpoint for Listener.ListenStateChanges - ListenStateChanges(ctx context.Context, in *ListenStateChangesRequest, opts ...grpc.CallOption) (*ListenStateChangesResponse, error) -} - -type listenerServiceClient struct { - cc grpc1.ClientConn -} - -func NewListenerServiceClient(cc grpc1.ClientConn) ListenerServiceClient { - return &listenerServiceClient{cc} -} - -func (c *listenerServiceClient) ListenDeliverBlock(ctx context.Context, in *ListenDeliverBlockRequest, opts ...grpc.CallOption) (*ListenDeliverBlockResponse, error) { - out := new(ListenDeliverBlockResponse) - err := c.cc.Invoke(ctx, "/cosmos.streaming.v1.ListenerService/ListenDeliverBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *listenerServiceClient) ListenStateChanges(ctx context.Context, in *ListenStateChangesRequest, opts ...grpc.CallOption) (*ListenStateChangesResponse, error) { - out := new(ListenStateChangesResponse) - err := c.cc.Invoke(ctx, "/cosmos.streaming.v1.ListenerService/ListenStateChanges", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ListenerServiceServer is the server API for ListenerService service. -type ListenerServiceServer interface { - // ListenDeliverBlock is the corresponding endpoint for Listener.ListenDeliverBlock - ListenDeliverBlock(context.Context, *ListenDeliverBlockRequest) (*ListenDeliverBlockResponse, error) - // ListenStateChanges is the corresponding endpoint for Listener.ListenStateChanges - ListenStateChanges(context.Context, *ListenStateChangesRequest) (*ListenStateChangesResponse, error) -} - -// UnimplementedListenerServiceServer can be embedded to have forward compatible implementations. -type UnimplementedListenerServiceServer struct { -} - -func (*UnimplementedListenerServiceServer) ListenDeliverBlock(ctx context.Context, req *ListenDeliverBlockRequest) (*ListenDeliverBlockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListenDeliverBlock not implemented") -} -func (*UnimplementedListenerServiceServer) ListenStateChanges(ctx context.Context, req *ListenStateChangesRequest) (*ListenStateChangesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListenStateChanges not implemented") -} - -func RegisterListenerServiceServer(s grpc1.Server, srv ListenerServiceServer) { - s.RegisterService(&_ListenerService_serviceDesc, srv) -} - -func _ListenerService_ListenDeliverBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListenDeliverBlockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ListenerServiceServer).ListenDeliverBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cosmos.streaming.v1.ListenerService/ListenDeliverBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ListenerServiceServer).ListenDeliverBlock(ctx, req.(*ListenDeliverBlockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ListenerService_ListenStateChanges_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListenStateChangesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ListenerServiceServer).ListenStateChanges(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cosmos.streaming.v1.ListenerService/ListenStateChanges", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ListenerServiceServer).ListenStateChanges(ctx, req.(*ListenStateChangesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ListenerService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cosmos.streaming.v1.ListenerService", - HandlerType: (*ListenerServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListenDeliverBlock", - Handler: _ListenerService_ListenDeliverBlock_Handler, - }, - { - MethodName: "ListenStateChanges", - Handler: _ListenerService_ListenStateChanges_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "cosmos/streaming/v1/grpc.proto", -} - -func (m *ListenDeliverBlockRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenDeliverBlockRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenDeliverBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TxResults) > 0 { - for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Txs) > 0 { - for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Txs[iNdEx]) - copy(dAtA[i:], m.Txs[iNdEx]) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Txs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.BlockHeight != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.BlockHeight)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ListenDeliverBlockResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenDeliverBlockResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenDeliverBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ListenStateChangesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenStateChangesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenStateChangesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AppHash) > 0 { - i -= len(m.AppHash) - copy(dAtA[i:], m.AppHash) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.AppHash))) - i-- - dAtA[i] = 0x1a - } - if len(m.ChangeSet) > 0 { - for iNdEx := len(m.ChangeSet) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChangeSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.BlockHeight != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.BlockHeight)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ListenStateChangesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenStateChangesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenStateChangesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *StoreKVPair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StoreKVPair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StoreKVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Delete { - i-- - if m.Delete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Address) > 0 { - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Address))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *EventAttribute) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventAttribute) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecTxResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecTxResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecTxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Codespace) > 0 { - i -= len(m.Codespace) - copy(dAtA[i:], m.Codespace) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Codespace))) - i-- - dAtA[i] = 0x42 - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.GasUsed != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 - } - if m.GasWanted != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.GasWanted)) - i-- - dAtA[i] = 0x28 - } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 - } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGrpc(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if m.Code != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintGrpc(dAtA []byte, offset int, v uint64) int { - offset -= sovGrpc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ListenDeliverBlockRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockHeight != 0 { - n += 1 + sovGrpc(uint64(m.BlockHeight)) - } - if len(m.Txs) > 0 { - for _, b := range m.Txs { - l = len(b) - n += 1 + l + sovGrpc(uint64(l)) - } - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - if len(m.TxResults) > 0 { - for _, e := range m.TxResults { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *ListenDeliverBlockResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ListenStateChangesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockHeight != 0 { - n += 1 + sovGrpc(uint64(m.BlockHeight)) - } - if len(m.ChangeSet) > 0 { - for _, e := range m.ChangeSet { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - l = len(m.AppHash) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *ListenStateChangesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *StoreKVPair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Address) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - if m.Delete { - n += 2 - } - return n -} - -func (m *Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *EventAttribute) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *ExecTxResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Code != 0 { - n += 1 + sovGrpc(uint64(m.Code)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - if m.GasWanted != 0 { - n += 1 + sovGrpc(uint64(m.GasWanted)) - } - if m.GasUsed != 0 { - n += 1 + sovGrpc(uint64(m.GasUsed)) - } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - l = len(m.Codespace) - if l > 0 { - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func sovGrpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGrpc(x uint64) (n int) { - return sovGrpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ListenDeliverBlockRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenDeliverBlockRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenDeliverBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) - } - m.BlockHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BlockHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) - copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TxResults = append(m.TxResults, &ExecTxResult{}) - if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenDeliverBlockResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenDeliverBlockResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenDeliverBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenStateChangesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenStateChangesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenStateChangesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) - } - m.BlockHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BlockHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChangeSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChangeSet = append(m.ChangeSet, &StoreKVPair{}) - if err := m.ChangeSet[len(m.ChangeSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) - if m.AppHash == nil { - m.AppHash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenStateChangesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenStateChangesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenStateChangesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StoreKVPair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StoreKVPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StoreKVPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) - if m.Address == nil { - m.Address = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Delete = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, &EventAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventAttribute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventAttribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventAttribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecTxResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecTxResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecTxResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Code |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Log = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) - } - m.GasWanted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) - } - m.GasUsed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Codespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGrpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGrpc - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGrpc - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGrpc - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGrpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGrpc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGrpc = fmt.Errorf("proto: unexpected end of group") -) diff --git a/server/v2/streaming/interface.go b/server/v2/streaming/interface.go deleted file mode 100644 index d7ee045343..0000000000 --- a/server/v2/streaming/interface.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package streaming provides shared data structures and interfaces for communication -// between the host application and plugins in a streaming context. -package streaming - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" -) - -// Listener defines the interface for a streaming service that hooks into -// the ABCI message processing of the BaseApp. Implementations should handle -// errors internally and return nil if they don't want to affect consensus. -type Listener interface { - // ListenDeliverBlock updates the streaming service with the latest Delivered Block messages. - ListenDeliverBlock(context.Context, ListenDeliverBlockRequest) error - - // ListenStateChanges updates the streaming service with the latest Commit messages and state changes. - ListenStateChanges(ctx context.Context, changeSet []*StoreKVPair) error -} - -// Handshake defines the handshake configuration shared by the streaming service and host. -// It serves as a UX feature to prevent execution of incompatible or unintended plugins. -var Handshake = plugin.HandshakeConfig{ - ProtocolVersion: 1, - MagicCookieKey: "ABCI_LISTENER_PLUGIN", - MagicCookieValue: "ef78114d-7bdf-411c-868f-347c99a78345", -} - -var _ plugin.GRPCPlugin = (*ListenerGRPCPlugin)(nil) - -// ListenerGRPCPlugin is the implementation of plugin.GRPCPlugin, so we can serve/consume this. -type ListenerGRPCPlugin struct { - // GRPCPlugin must still implement the Plugin interface - plugin.Plugin - // Concrete implementation, written in Go. This is only used for plugins - // that are written in Go. - Impl Listener -} - -// GRPCServer registers the ListenerService server implementation. -func (p *ListenerGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - RegisterListenerServiceServer(s, &GRPCServer{Impl: p.Impl}) - return nil -} - -// GRPCClient creates a new ListenerService client. -func (p *ListenerGRPCPlugin) GRPCClient( - _ context.Context, - _ *plugin.GRPCBroker, - c *grpc.ClientConn, -) (interface{}, error) { - return &GRPCClient{client: NewListenerServiceClient(c)}, nil -} diff --git a/server/v2/streaming/plugin.md b/server/v2/streaming/plugin.md deleted file mode 100644 index 9eedff9ea2..0000000000 --- a/server/v2/streaming/plugin.md +++ /dev/null @@ -1,212 +0,0 @@ -# State Streaming Plugin (gRPC) - - - -The `Server/v2` package contains the interface for a [Listener](https://github.com/cosmos/cosmos-sdk/blob/main/baseapp/streaming.go) -service used to write state changes out from individual KVStores to external systems, -as described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-038-state-listening.md). - -Specific `ABCIListener` service implementations are written and loaded as [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin). - -## Implementation - -In this section we describe the implementation of the `ABCIListener` interface as a gRPC service. - -### Service Protocol - -The companion service protocol for the `Listener` interface is described below. -See [proto/cosmos/store/streaming/abci/grpc.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/store/streaming/abci/grpc.proto) for full details. - -```protobuf reference -https://github.com/cosmos/cosmos-sdk/blob/6cee22df52eb0cbb30e351fbb41f66d26c1f8300/proto/cosmos/store/streaming/abci/grpc.proto#L1-L36 -``` - -### Generating the Code - -To generate the stubs the local client implementation can call, run the following command: - -```shell -make proto-gen -``` - -For other languages you'll need to [download](https://github.com/cosmos/cosmos-sdk/blob/main/third_party/proto/README.md) -the CosmosSDK protos into your project and compile. For language specific compilation instructions visit -[https://github.com/grpc](https://github.com/grpc) and look in the `examples` folder of your -language of choice `https://github.com/grpc/grpc-{language}/tree/master/examples` and [https://grpc.io](https://grpc.io) -for the documentation. - -### gRPC Client and Server - -Implementing the ABCIListener gRPC client and server is a simple and straight forward process. - -To create the client and server we create a `ListenerGRPCPlugin` struct that implements the -`plugin.GRPCPlugin` interface and a `Impl` property that will contain a concrete implementation -of the `ABCIListener` plugin written in Go. - -#### The Interface - -The `Server/v2` `Listener` interface will be what will define the plugins capabilities. - -Boilerplate RPC implementation example of the `Listener` interface. ([store/streaming/abci/grpc.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/abci/grpc.go)) - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/grpc.go#L13-L79 -``` - -Our `listener` service plugin. ([store/streaming/plugins/abci/v1/interface.go](interface.go)) - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/interface.go#L13-L45 -``` - -#### Plugin Implementation - -Plugin implementations can be in a completely separate package but will need access -to the `Listener` interface. One thing to note here is that plugin implementations -defined in the `ListenerGRPCPlugin.Impl` property are **only** required when building -plugins in Go. They are pre-compiled into Go modules. The `GRPCServer.Impl` calls methods -on this out-of-process plugin. - -For Go plugins this is all that is required to process data that is sent over gRPC. -This provides the advantage of writing quick plugins that process data to different -external systems (i.e: DB, File, DB, Kafka, etc.) without the need for implementing -the gRPC server endpoints. - -```go -// MyPlugin is the implementation of the ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type MyPlugin struct { - ... -} - -func (a FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { - // process data - return nil -} - -func (a FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { - // process data - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: v1.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &ABCIListenerGRPCPlugin{Impl: &MyPlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} -``` - -## Plugin Loading System - -A general purpose plugin loading system has been provided by the SDK to be able to load not just -the `ABCIListener` service plugin but other protocol services as well. You can take a look -at how plugins are loaded by the SDK in [store/streaming/streaming.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/streaming.go) - -You'll need to add this in your `app.go` - -```go -// app.go - -func NewApp(...) *App { - - ... - - // register streaming services - streamingCfg := cast.ToStringMap(appOpts.Get(baseapp.StreamingTomlKey)) - for service := range streamingCfg { - pluginKey := fmt.Sprintf("%s.%s.%s", baseapp.StreamingTomlKey, service, baseapp.StreamingABCIPluginTomlKey) - pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey))) - if len(pluginName) > 0 { - logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel)) - plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel) - if err != nil { - tmos.Exit(err.Error()) - } - if err := baseapp.RegisterStreamingPlugin(bApp, appOpts, keys, plugin); err != nil { - tmos.Exit(err.Error()) - } - } - } - - ... -} -``` - -## Configuration - -Update the streaming section in `app.toml` - -```toml -# Streaming allows nodes to stream state to external systems -[streaming] - -# streaming.abci specifies the configuration for the ABCI Listener streaming service -[streaming.abci] - -# List of kv store keys to stream out via gRPC -# Set to ["*"] to expose all keys. -keys = ["*"] - -# The plugin name used for streaming via gRPC -# Supported plugins: abci -plugin = "abci" - -# stop-node-on-err specifies whether to stop the node when the -stop-node-on-err = true -``` - -## Updating the protocol - -If you update the protocol buffers file, you can regenerate the file and plugins using the -following commands from the project root directory. You do not need to run this if you're -just trying the examples, you can skip ahead to the [Testing](#testing) section. - -```shell -make proto-gen -``` - -* stdout plugin; from inside the `store/` dir, run: - -```shell -go build -o streaming/abci/examples/stdout/stdout streaming/abci/examples/stdout/stdout.go -``` - -* file plugin (writes to `~/`); from inside the `store/` dir, run: - -```shell -go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go -``` - -### Testing - -Export a plugin from one of the Go or Python examples. - -* stdout plugin - -```shell -export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/stdout/stdout" -``` - -* file plugin (writes to ~/) - -```shell -export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/file/file" -``` - -where `{path to}` is the parent path to the `cosmos-sdk` repo on you system. - -Test: - -```shell -make test-sim-nondeterminism-streaming -``` - -The plugin system will look for the plugin binary in the `env` variable `COSMOS_SDK_{PLUGIN_NAME}` above -and if it does not find it, it will error out. The plugin UPPERCASE name is that of the -`streaming.abci.plugin` TOML configuration setting. diff --git a/server/v2/streaming/streaming.go b/server/v2/streaming/streaming.go deleted file mode 100644 index f788c71f54..0000000000 --- a/server/v2/streaming/streaming.go +++ /dev/null @@ -1,80 +0,0 @@ -package streaming - -import ( - "fmt" - "os" - "os/exec" - "strings" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" -) - -const ( - pluginEnvKeyPrefix = "COSMOS_SDK" - defaultPlugin = "grpc" -) - -// HandshakeMap contains a map of each supported streaming's handshake config -var HandshakeMap = map[string]plugin.HandshakeConfig{ - defaultPlugin: Handshake, -} - -// PluginMap contains a map of supported gRPC plugins -var PluginMap = map[string]plugin.Plugin{ - defaultPlugin: &ListenerGRPCPlugin{}, -} - -func GetPluginEnvKey(name string) string { - return fmt.Sprintf("%s_%s", pluginEnvKeyPrefix, strings.ToUpper(name)) -} - -func NewStreamingPlugin(name, logLevel string) (interface{}, error) { - logger := hclog.New(&hclog.LoggerOptions{ - Output: hclog.DefaultOutput, - Level: toHclogLevel(logLevel), - Name: fmt.Sprintf("plugin.%s", name), - }) - - // We're a host. Start by launching the streaming process. - env := os.Getenv(GetPluginEnvKey(name)) - client := plugin.NewClient(&plugin.ClientConfig{ - HandshakeConfig: HandshakeMap[name], - Managed: true, - Plugins: PluginMap, - // For verifying the integrity of executables see SecureConfig documentation - // https://pkg.go.dev/github.com/hashicorp/go-plugin#SecureConfig - //#nosec G204 -- Required to load plugins - Cmd: exec.Command("sh", "-c", env), - Logger: logger, - AllowedProtocols: []plugin.Protocol{ - plugin.ProtocolNetRPC, plugin.ProtocolGRPC, - }, - }) - - // Connect via RPC - rpcClient, err := client.Client() - if err != nil { - return nil, err - } - - // Request streaming plugin - return rpcClient.Dispense(name) -} - -func toHclogLevel(s string) hclog.Level { - switch s { - case "trace": - return hclog.Trace - case "debug": - return hclog.Debug - case "info": - return hclog.Info - case "warn": - return hclog.Warn - case "error": - return hclog.Error - default: - return hclog.DefaultLevel - } -} diff --git a/server/v2/streaming/streaming_test.go b/server/v2/streaming/streaming_test.go deleted file mode 100644 index 0a590c9511..0000000000 --- a/server/v2/streaming/streaming_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package streaming - -import ( - "context" - "fmt" - "os" - "runtime" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "cosmossdk.io/core/log" - coretesting "cosmossdk.io/core/testing" -) - -type PluginTestSuite struct { - suite.Suite - - loggerCtx MockContext - - workDir string - - deliverBlockrequest ListenDeliverBlockRequest - stateChangeRequest ListenStateChangesRequest - - changeSet []*StoreKVPair -} - -func (s *PluginTestSuite) SetupTest() { - if runtime.GOOS != "linux" { - s.T().Skip("only run on linux") - } - - path, err := os.Getwd() - if err != nil { - s.T().Fail() - } - s.workDir = path - - pluginVersion := defaultPlugin - // to write data to files, replace stdout/stdout => file/file - pluginPath := fmt.Sprintf("%s/abci/examples/stdout/stdout", s.workDir) - if err := os.Setenv(GetPluginEnvKey(pluginVersion), pluginPath); err != nil { - s.T().Fail() - } - - raw, err := NewStreamingPlugin(pluginVersion, "trace") - require.NoError(s.T(), err, "load", "streaming", "unexpected error") - - abciListener, ok := raw.(Listener) - require.True(s.T(), ok, "should pass type check") - - logger := coretesting.NewNopLogger() - streamingService := Manager{ - Listeners: []Listener{abciListener}, - StopNodeOnErr: true, - } - s.loggerCtx = NewMockContext(1, logger, streamingService) - - // test abci message types - - s.deliverBlockrequest = ListenDeliverBlockRequest{ - BlockHeight: s.loggerCtx.BlockHeight(), - Txs: [][]byte{{1, 2, 3, 4, 5, 6, 7, 8, 9}}, - Events: []*Event{}, - } - s.stateChangeRequest = ListenStateChangesRequest{} - - key := []byte("mockStore") - key = append(key, 1, 2, 3) - // test store kv pair types - for range [2000]int{} { - s.changeSet = append(s.changeSet, &StoreKVPair{ - Key: key, - Value: []byte{3, 2, 1}, - }) - } -} - -func TestPluginTestSuite(t *testing.T) { - suite.Run(t, new(PluginTestSuite)) -} - -func (s *PluginTestSuite) TestABCIGRPCPlugin() { - s.T().Run("Should successfully load streaming", func(t *testing.T) { - abciListeners := s.loggerCtx.StreamingManager().Listeners - for _, abciListener := range abciListeners { - for i := range [50]int{} { - - err := abciListener.ListenDeliverBlock(s.loggerCtx, s.deliverBlockrequest) - assert.NoError(t, err, "ListenEndBlock") - - err = abciListener.ListenStateChanges(s.loggerCtx, s.changeSet) - assert.NoError(t, err, "ListenCommit") - - s.updateHeight(int64(i + 1)) - } - } - }) -} - -func (s *PluginTestSuite) updateHeight(n int64) { - s.loggerCtx = NewMockContext(n, s.loggerCtx.Logger(), s.loggerCtx.StreamingManager()) -} - -var ( - _ context.Context = MockContext{} - _ Context = MockContext{} -) - -type MockContext struct { - baseCtx context.Context - height int64 - logger log.Logger - streamingManager Manager -} - -func (m MockContext) BlockHeight() int64 { return m.height } -func (m MockContext) Logger() log.Logger { return m.logger } -func (m MockContext) StreamingManager() Manager { return m.streamingManager } - -func NewMockContext(height int64, logger log.Logger, sm Manager) MockContext { - return MockContext{ - baseCtx: context.Background(), - height: height, - logger: logger, - streamingManager: sm, - } -} - -func (m MockContext) Deadline() (deadline time.Time, ok bool) { - return m.baseCtx.Deadline() -} - -func (m MockContext) Done() <-chan struct{} { - return m.baseCtx.Done() -} - -func (m MockContext) Err() error { - return m.baseCtx.Err() -} - -func (m MockContext) Value(key any) any { - return m.baseCtx.Value(key) -} diff --git a/server/v2/streaming/types.go b/server/v2/streaming/types.go deleted file mode 100644 index 67c589ab77..0000000000 --- a/server/v2/streaming/types.go +++ /dev/null @@ -1,11 +0,0 @@ -package streaming - -// Manager is the struct that maintains a list of ABCIListeners and configuration settings. -type Manager struct { - // Listeners for hooking into the message processing of the server - // and exposing the requests and responses to external consumers - Listeners []Listener - - // StopNodeOnErr halts the node when ABCI streaming service listening results in an error. - StopNodeOnErr bool -} diff --git a/server/v2/streaming/utils.go b/server/v2/streaming/utils.go deleted file mode 100644 index 658f2e978d..0000000000 --- a/server/v2/streaming/utils.go +++ /dev/null @@ -1,23 +0,0 @@ -package streaming - -import "cosmossdk.io/core/event" - -func IntoStreamingEvents(events []event.Event) []*Event { - streamingEvents := make([]*Event, len(events)) - - for _, event := range events { - strEvent := &Event{ - Type: event.Type, - } - - for _, eventValue := range event.Attributes { - strEvent.Attributes = append(strEvent.Attributes, &EventAttribute{ - Key: eventValue.Key, - Value: eventValue.Value, - }) - } - streamingEvents = append(streamingEvents, strEvent) - } - - return streamingEvents -} diff --git a/server/v2/testdata/app.toml b/server/v2/testdata/app.toml deleted file mode 100644 index 20482ac442..0000000000 --- a/server/v2/testdata/app.toml +++ /dev/null @@ -1,17 +0,0 @@ -[grpc] -# Enable defines if the gRPC server should be enabled. -enable = false -# Address defines the gRPC server address to bind to. -address = 'localhost:9090' -# MaxRecvMsgSize defines the max message size in bytes the server can receive. -# The default value is 10MB. -max-recv-msg-size = 10485760 -# MaxSendMsgSize defines the max message size in bytes the server can send. -# The default value is math.MaxInt32. -max-send-msg-size = 2147483647 - -[mock-server-1] -# Mock field -mock_field = 'default' -# Mock field two -mock_field_two = 1 diff --git a/server/v2/testdata/config.toml b/server/v2/testdata/config.toml deleted file mode 100644 index d42fa7f98d..0000000000 --- a/server/v2/testdata/config.toml +++ /dev/null @@ -1,482 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable -# or --home cmd flag. - -# The version of the CometBFT binary that created or -# last modified the config file. Do not modify this. -version = "0.38.7" - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the CometBFT binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "aurn-node" - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for CometBFT to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://127.0.0.1:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental_subscription_buffer_size = 200 - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). -# -# Must be at least the same as "experimental_subscription_buffer_size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental_subscription_buffer_size" to -# accommodate non-subscription-related RPC responses. -experimental_websocket_write_buffer_size = 200 - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. -# -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behavior. -experimental_close_on_slow_client = false - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to CometBFT's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to CometBFT's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial. If empty, will use the same -# port as the laddr, and will introspect on the listener to figure out the -# address. IP and port are required. Example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -# The type of mempool for this node to use. -# -# Possible types: -# - "flood" : concurrent linked list mempool with flooding gossip protocol -# (default) -# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible -# for storing, disseminating and proposing txs). "create_empty_blocks=false" is -# not supported. -type = "flood" - -# Recheck (default: true) defines whether CometBFT should recheck the -# validity for all remaining transaction in the mempool after a block. -# Since a block affects the application state, some transactions in the -# mempool may become invalid. If this does not apply to your application, -# you can disable rechecking. -recheck = true - -# Broadcast (default: true) defines whether the mempool should relay -# transactions to other peers. Setting this to false will stop the mempool -# from relaying transactions to other peers until they are included in a -# block. In other words, if Broadcast is disabled, only the peer you send -# the tx to will see it until it is included in a block. -broadcast = true - -# WalPath (default: "") configures the location of the Write Ahead Log -# (WAL) for the mempool. The WAL is disabled by default. To enable, set -# WalPath to where you want the WAL to be written (e.g. -# "data/mempool.wal"). -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -# Experimental parameters to limit gossiping txs to up to the specified number of peers. -# We use two independent upper values for persistent and non-persistent peers. -# Unconditional peers are not affected by this feature. -# If we are connected to more than the specified number of persistent peers, only send txs to -# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those -# persistent peers disconnects, activate another persistent peer. -# Similarly for non-persistent peers, with an upper limit of -# ExperimentalMaxGossipConnectionsToNonPersistentPeers. -# If set to 0, the feature is disabled for the corresponding group of peers, that is, the -# number of active connections to that group of peers is not bounded. -# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental -# performance results using the default P2P configuration. -experimental_max_gossip_connections_to_persistent_peers = 0 -experimental_max_gossip_connections_to_non_persistent_peers = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Block Sync Configuration Options ### -####################################################### -[blocksync] - -# Block Sync version to use: -# -# In v0.37, v1 and v2 of the block sync protocols were deprecated. -# Please use v0 instead. -# -# 1) "v0" - the default block sync implementation -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "3s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Storage Configuration Options ### -####################################################### -[storage] - -# Set to true to discard ABCI responses from the state store, which can save a -# considerable amount of disk space. Set to false to ensure ABCI responses are -# persisted. ABCI responses are required for /block_results RPC queries, and to -# reindex events in the command-line tool. -discard_abci_responses = false - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -# 3) "psql" - the indexer services backed by PostgreSQL. -# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -# The PostgreSQL connection configuration, the connection format: -# postgresql://:@:/? -psql-conn = "" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "cometbft" diff --git a/server/v2/types.go b/server/v2/types.go deleted file mode 100644 index 978b46b788..0000000000 --- a/server/v2/types.go +++ /dev/null @@ -1,22 +0,0 @@ -package serverv2 - -import ( - gogoproto "github.com/cosmos/gogoproto/proto" - "github.com/spf13/viper" - - coreapp "cosmossdk.io/core/app" - "cosmossdk.io/core/transaction" - "cosmossdk.io/log" - "cosmossdk.io/server/v2/appmanager" -) - -type AppCreator[T transaction.Tx] func(log.Logger, *viper.Viper) AppI[T] - -type AppI[T transaction.Tx] interface { - Name() string - InterfaceRegistry() coreapp.InterfaceRegistry - GetAppManager() *appmanager.AppManager[T] - GetConsensusAuthority() string - GetGPRCMethodsToMessageMap() map[string]func() gogoproto.Message - GetStore() any -} diff --git a/server/v2/util.go b/server/v2/util.go deleted file mode 100644 index 72335621b4..0000000000 --- a/server/v2/util.go +++ /dev/null @@ -1,106 +0,0 @@ -package serverv2 - -import ( - "context" - "errors" - "fmt" - "net" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - corectx "cosmossdk.io/core/context" - "cosmossdk.io/log" -) - -// SetCmdServerContext sets a command's Context value to the provided argument. -// If the context has not been set, set the given context as the default. -func SetCmdServerContext(cmd *cobra.Command, viper *viper.Viper, logger log.Logger) error { - var cmdCtx context.Context - if cmd.Context() == nil { - cmdCtx = context.Background() - } else { - cmdCtx = cmd.Context() - } - - cmdCtx = context.WithValue(cmdCtx, corectx.LoggerContextKey, logger) - cmdCtx = context.WithValue(cmdCtx, corectx.ViperContextKey, viper) - cmd.SetContext(cmdCtx) - - return nil -} - -func GetViperFromCmd(cmd *cobra.Command) *viper.Viper { - value := cmd.Context().Value(corectx.ViperContextKey) - v, ok := value.(*viper.Viper) - if !ok { - panic(fmt.Sprintf("incorrect viper type %T: expected *viper.Viper. Have you forgot to set the viper in the command context?", value)) - } - return v -} - -func GetLoggerFromCmd(cmd *cobra.Command) log.Logger { - v := cmd.Context().Value(corectx.LoggerContextKey) - logger, ok := v.(log.Logger) - if !ok { - panic(fmt.Sprintf("incorrect logger type %T: expected log.Logger. Have you forgot to set the logger in the command context?", v)) - } - - return logger -} - -// ExternalIP https://stackoverflow.com/questions/23558425/how-do-i-get-the-local-ip-address-in-go -// TODO there must be a better way to get external IP -func ExternalIP() (string, error) { - ifaces, err := net.Interfaces() - if err != nil { - return "", err - } - - for _, iface := range ifaces { - if skipInterface(iface) { - continue - } - addrs, err := iface.Addrs() - if err != nil { - return "", err - } - - for _, addr := range addrs { - ip := addrToIP(addr) - if ip == nil || ip.IsLoopback() { - continue - } - ip = ip.To4() - if ip == nil { - continue // not an ipv4 address - } - return ip.String(), nil - } - } - return "", errors.New("are you connected to the network?") -} - -func skipInterface(iface net.Interface) bool { - if iface.Flags&net.FlagUp == 0 { - return true // interface down - } - - if iface.Flags&net.FlagLoopback != 0 { - return true // loopback interface - } - - return false -} - -func addrToIP(addr net.Addr) net.IP { - var ip net.IP - - switch v := addr.(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - } - return ip -} diff --git a/simapp/go.mod b/simapp/go.mod index 8acf7741bd..b030fcf6df 100644 --- a/simapp/go.mod +++ b/simapp/go.mod @@ -11,7 +11,7 @@ require ( cosmossdk.io/depinject v1.0.0 cosmossdk.io/log v1.3.1 cosmossdk.io/math v1.3.0 - cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc + cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // main cosmossdk.io/tools/confix v0.0.0-20230613133644-0a778132a60f cosmossdk.io/x/accounts v0.0.0-20240226161501-23359a0b6d91 cosmossdk.io/x/accounts/defaults/lockup v0.0.0-20240417181816-5e7aae0db1f5 @@ -37,7 +37,7 @@ require ( github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-db v1.0.2 // this version is not used as it is always replaced by the latest Cosmos SDK version - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/spf13/cast v1.6.0 @@ -244,7 +244,7 @@ replace ( cosmossdk.io/collections => ../collections cosmossdk.io/core => ../core cosmossdk.io/core/testing => ../core/testing - cosmossdk.io/store => ../store + cosmossdk.io/store => cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 // main cosmossdk.io/tools/confix => ../tools/confix cosmossdk.io/x/accounts => ../x/accounts cosmossdk.io/x/accounts/defaults/lockup => ../x/accounts/defaults/lockup diff --git a/simapp/go.sum b/simapp/go.sum index 53269104f8..35f835d895 100644 --- a/simapp/go.sum +++ b/simapp/go.sum @@ -202,6 +202,8 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 h1:lhyOHcIJU+IB6i5sO36DWC2r4QXDEk/bsno7jrTr28k= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6/go.mod h1:CY8wAToETz/dmuuKwf/qfXEImtey4jWdWWcoavfQWNw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= diff --git a/simapp/v2/app_di.go b/simapp/v2/app_di.go index 47e78730e3..7cde4042f3 100644 --- a/simapp/v2/app_di.go +++ b/simapp/v2/app_di.go @@ -2,7 +2,6 @@ package simapp import ( _ "embed" - "path/filepath" "github.com/spf13/viper" @@ -14,10 +13,6 @@ import ( "cosmossdk.io/log" "cosmossdk.io/runtime/v2" serverv2 "cosmossdk.io/server/v2" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment/iavl" - "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/root" "cosmossdk.io/x/accounts" authkeeper "cosmossdk.io/x/auth/keeper" authzkeeper "cosmossdk.io/x/authz/keeper" @@ -41,6 +36,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/std" + _ "github.com/cosmos/cosmos-sdk/x/genutil" ) // DefaultNodeHome default home directories for the application daemon @@ -97,10 +93,6 @@ func NewSimApp[T transaction.Tx]( viper *viper.Viper, ) *SimApp[T] { viper.Set(serverv2.FlagHome, DefaultNodeHome) // TODO possibly set earlier when viper is created - scRawDb, err := db.NewGoLevelDB("application", filepath.Join(DefaultNodeHome, "data"), nil) - if err != nil { - panic(err) - } var ( app = &SimApp[T]{} appBuilder *runtime.AppBuilder[T] @@ -110,21 +102,6 @@ func NewSimApp[T transaction.Tx]( AppConfig(), depinject.Supply( logger, - &root.FactoryOptions{ - Logger: logger, - RootDir: DefaultNodeHome, - SSType: 0, - SCType: 0, - SCPruningOption: &store.PruningOption{ - KeepRecent: 0, - Interval: 0, - }, - IavlConfig: &iavl.Config{ - CacheSize: 100_000, - SkipFastStorageUpgrade: true, - }, - SCRawDB: scRawDb, - }, viper, // ADVANCED CONFIGURATION @@ -206,6 +183,7 @@ func NewSimApp[T transaction.Tx]( panic(err) } + var err error app.App, err = appBuilder.Build() if err != nil { panic(err) diff --git a/simapp/v2/go.mod b/simapp/v2/go.mod index 9aab746633..e1da756847 100644 --- a/simapp/v2/go.mod +++ b/simapp/v2/go.mod @@ -10,9 +10,9 @@ require ( cosmossdk.io/log v1.3.1 cosmossdk.io/math v1.3.0 cosmossdk.io/runtime/v2 v2.0.0-00010101000000-000000000000 - cosmossdk.io/server/v2 v2.0.0-00010101000000-000000000000 + cosmossdk.io/server/v2 v2.0.0-20240731205446-aee9803a0af6 // main cosmossdk.io/server/v2/cometbft v0.0.0-00010101000000-000000000000 - cosmossdk.io/store/v2 v2.0.0 + cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6 // indirect; main cosmossdk.io/tools/confix v0.0.0-00010101000000-000000000000 cosmossdk.io/x/accounts v0.0.0-20240226161501-23359a0b6d91 cosmossdk.io/x/auth v0.0.0-00010101000000-000000000000 @@ -34,7 +34,7 @@ require ( github.com/cometbft/cometbft v1.0.0-rc1 github.com/cosmos/cosmos-db v1.0.2 // this version is not used as it is always replaced by the latest Cosmos SDK version - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.53.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 @@ -42,8 +42,6 @@ require ( google.golang.org/protobuf v1.34.2 ) -require cosmossdk.io/core/testing v0.0.0-20240726110027-5c90246b3f9f // indirect - require ( buf.build/gen/go/cometbft/cometbft/protocolbuffers/go v1.34.2-20240701160653-fedbb9acfd2f.2 // indirect buf.build/gen/go/cosmos/gogo-proto/protocolbuffers/go v1.34.2-20240130113600-88ef6483f90f.2 // indirect @@ -54,11 +52,12 @@ require ( cloud.google.com/go/iam v1.1.8 // indirect cloud.google.com/go/storage v1.42.0 // indirect cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/core/testing v0.0.0-20240726110027-5c90246b3f9f // indirect cosmossdk.io/errors v1.0.1 // indirect cosmossdk.io/schema v0.1.1 // indirect - cosmossdk.io/server/v2/appmanager v0.0.0-20240726110027-5c90246b3f9f // indirect - cosmossdk.io/server/v2/stf v0.0.0-20240726110027-5c90246b3f9f // indirect - cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // indirect + cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6 // indirect; main + cosmossdk.io/server/v2/stf v0.0.0-20240731205446-aee9803a0af6 // indirect; main + cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // indirect; main cosmossdk.io/x/accounts/defaults/lockup v0.0.0-20240417181816-5e7aae0db1f5 // indirect cosmossdk.io/x/accounts/defaults/multisig v0.0.0-00010101000000-000000000000 // indirect cosmossdk.io/x/epochs v0.0.0-20240522060652-a1ae4c3e0337 // indirect @@ -240,6 +239,8 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) +require cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 // indirect + // Here are the short-lived replace from the SimApp // Replace here are pending PRs, or version to be tagged // replace ( @@ -293,10 +294,7 @@ replace ( cosmossdk.io/api => ../../api cosmossdk.io/core/testing => ../../core/testing cosmossdk.io/runtime/v2 => ../../runtime/v2 - cosmossdk.io/server/v2 => ../../server/v2 - cosmossdk.io/server/v2/appmanager => ../../server/v2/appmanager cosmossdk.io/server/v2/cometbft => ../../server/v2/cometbft - cosmossdk.io/server/v2/stf => ../../server/v2/stf - cosmossdk.io/store => ../../store - cosmossdk.io/store/v2 => ../../store/v2 + cosmossdk.io/store => cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 // main + ) diff --git a/simapp/v2/go.sum b/simapp/v2/go.sum index 807bcde09e..52b7536525 100644 --- a/simapp/v2/go.sum +++ b/simapp/v2/go.sum @@ -196,12 +196,24 @@ cosmossdk.io/depinject v1.0.0 h1:dQaTu6+O6askNXO06+jyeUAnF2/ssKwrrszP9t5q050= cosmossdk.io/depinject v1.0.0/go.mod h1:zxK/h3HgHoA/eJVtiSsoaRaRA2D5U4cJ5thIG4ssbB8= cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5 h1:IQNdY2kB+k+1OM2DvqFG1+UgeU1JzZrWtwuWzI3ZfwA= +cosmossdk.io/errors/v2 v2.0.0-20240731132947-df72853b3ca5/go.mod h1:0CuYKkFHxc1vw2JC+t21THBCALJVROrWVR/3PQ1urpc= cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/server/v2 v2.0.0-20240731205446-aee9803a0af6 h1:r2BXi/s99Mq1ShLmP4QTlcUbMvVPKTMQztSbevu6Xeo= +cosmossdk.io/server/v2 v2.0.0-20240731205446-aee9803a0af6/go.mod h1:alRmtz2gedZe+goFHbNjkBPNTkShFW6HEeXiyT7hdHM= +cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6 h1:vrHmVjfEjEwQh90dim272gYq7OFILg4Yrv3XzreMpe4= +cosmossdk.io/server/v2/appmanager v0.0.0-20240731205446-aee9803a0af6/go.mod h1:Xm5IOSjw45Sew7fiVckaTCIU5oQPs20V+54NOqR3H4o= +cosmossdk.io/server/v2/stf v0.0.0-20240731205446-aee9803a0af6 h1:F8yfqCf1cAwuZZnIxinmzr/2nmLjhK9K/BJfBjW3nJ0= +cosmossdk.io/server/v2/stf v0.0.0-20240731205446-aee9803a0af6/go.mod h1:IUbZp79IZ4NCR2eNXA0utcQOS8lz34BvsAWTeCGwGAM= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 h1:lhyOHcIJU+IB6i5sO36DWC2r4QXDEk/bsno7jrTr28k= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6/go.mod h1:CY8wAToETz/dmuuKwf/qfXEImtey4jWdWWcoavfQWNw= +cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6 h1:/ffIfMKzoCVUI38t5Vq3BNW9U8exRMxK5QgS/ujn0lA= +cosmossdk.io/store/v2 v2.0.0-20240731205446-aee9803a0af6/go.mod h1:aG3brMLcldPsdhfkdCaisGDIe+tXTNWdUDt5JYsRDl8= cosmossdk.io/x/epochs v0.0.0-20240522060652-a1ae4c3e0337 h1:GuBrfHsK3RD5vlD4DuBz3DXslR6VlnzrYmHOC3L679Q= cosmossdk.io/x/epochs v0.0.0-20240522060652-a1ae4c3e0337/go.mod h1:PhLn1pMBilyRC4GfRkoYhm+XVAYhF4adVrzut8AdpJI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -697,8 +709,8 @@ github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= +github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= diff --git a/simapp/v2/simdv2/cmd/commands.go b/simapp/v2/simdv2/cmd/commands.go index a42c1a9a48..8d1e8bd40a 100644 --- a/simapp/v2/simdv2/cmd/commands.go +++ b/simapp/v2/simdv2/cmd/commands.go @@ -16,6 +16,7 @@ import ( serverv2 "cosmossdk.io/server/v2" "cosmossdk.io/server/v2/api/grpc" "cosmossdk.io/server/v2/cometbft" + "cosmossdk.io/server/v2/store" "cosmossdk.io/simapp/v2" confixcmd "cosmossdk.io/tools/confix/cmd" authcmd "cosmossdk.io/x/auth/client/cli" @@ -76,6 +77,7 @@ func initRootCmd[T transaction.Tx]( logger, cometbft.New(&genericTxDecoder[T]{txConfig}, cometbft.DefaultServerOptions[T]()), grpc.New[T](), + store.New[T](), ); err != nil { panic(err) } diff --git a/simapp/v2/simdv2/cmd/testnet.go b/simapp/v2/simdv2/cmd/testnet.go index 4b9986a075..2d4c3b348d 100644 --- a/simapp/v2/simdv2/cmd/testnet.go +++ b/simapp/v2/simdv2/cmd/testnet.go @@ -22,6 +22,7 @@ import ( serverv2 "cosmossdk.io/server/v2" "cosmossdk.io/server/v2/api/grpc" "cosmossdk.io/server/v2/cometbft" + "cosmossdk.io/server/v2/store" authtypes "cosmossdk.io/x/auth/types" banktypes "cosmossdk.io/x/bank/types" stakingtypes "cosmossdk.io/x/staking/types" @@ -343,7 +344,8 @@ func initTestnetFiles[T transaction.Tx]( cometbft.OverwriteDefaultConfigTomlConfig(nodeConfig), ) grpcServer := grpc.New[T](grpc.OverwriteDefaultConfig(grpcConfig)) - server := serverv2.NewServer(log.NewNopLogger(), cometServer, grpcServer) + storeServer := store.New[T]() + server := serverv2.NewServer(log.NewNopLogger(), cometServer, grpcServer, storeServer) err = server.WriteConfig(filepath.Join(nodeDir, "config")) if err != nil { return err diff --git a/store/CHANGELOG.md b/store/CHANGELOG.md deleted file mode 100644 index 6cf4115057..0000000000 --- a/store/CHANGELOG.md +++ /dev/null @@ -1,73 +0,0 @@ - - -# Changelog - -## [Unreleased] - -### Bug Fixes - -* (store) [#20425](https://github.com/cosmos/cosmos-sdk/pull/20425) Fix nil pointer panic when query historical state where a new store don't exist. -* (store) [#20644](https://github.com/cosmos/cosmos-sdk/pull/20644) Avoid nil error on not exhausted payload stream. - -## v1.1.0 (March 20, 2024) - -### Improvements - -* [#19770](https://github.com/cosmos/cosmos-sdk/pull/19770) Upgrade IAVL to IAVL v1.1.1. - -## v1.0.2 (January 10, 2024) - -### Bug Fixes - -* [#18897](https://github.com/cosmos/cosmos-sdk/pull/18897) Replace panic in pruning to avoid consensus halting. - -## v1.0.1 (November 28, 2023) - -### Bug Fixes - -* [#18563](https://github.com/cosmos/cosmos-sdk/pull/18563) `LastCommitID().Hash` will always return `sha256([]byte{})` if the store is empty. - -## v1.0.0 (October 31, 2023) - -### Features - -* [#17294](https://github.com/cosmos/cosmos-sdk/pull/17294) Add snapshot manager Close method. -* [#15568](https://github.com/cosmos/cosmos-sdk/pull/15568) Migrate the `iavl` to the new key format. - * Remove `DeleteVersion`, `DeleteVersions`, `LazyLoadVersionForOverwriting` from `iavl` tree API. - * Add `DeleteVersionsTo` and `SaveChangeSet`, since it will keep versions sequentially like `fromVersion` to `toVersion`. - * Refactor the pruning manager to use `DeleteVersionsTo`. -* [#15712](https://github.com/cosmos/cosmos-sdk/pull/15712) Add `WorkingHash` function to the store interface to get the current app hash before commit. -* [#14645](https://github.com/cosmos/cosmos-sdk/pull/14645) Add limit to the length of key and value. -* [#15683](https://github.com/cosmos/cosmos-sdk/pull/15683) `rootmulti.Store.CacheMultiStoreWithVersion` now can handle loading archival states that don't persist any of the module stores the current state has. -* [#16060](https://github.com/cosmos/cosmos-sdk/pull/16060) Support saving restoring snapshot locally. -* [#14746](https://github.com/cosmos/cosmos-sdk/pull/14746) The `store` module is extracted to have a separate go.mod file which allows it be a standalone module. -* [#14410](https://github.com/cosmos/cosmos-sdk/pull/14410) `rootmulti.Store.loadVersion` has validation to check if all the module stores' height is correct, it will error if any module store has incorrect height. - -### Improvements - -* [#17158](https://github.com/cosmos/cosmos-sdk/pull/17158) Start the goroutine after need to create a snapshot. - -### API Breaking Changes - -* [#16321](https://github.com/cosmos/cosmos-sdk/pull/16321) QueryInterface defines its own request and response types instead of relying on comet/abci & returns an error diff --git a/store/cache/benchmark_test.go b/store/cache/benchmark_test.go deleted file mode 100644 index 76f875a0d1..0000000000 --- a/store/cache/benchmark_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package cache - -import ( - "testing" - - "cosmossdk.io/store/types" -) - -func freshMgr() *CommitKVStoreCacheManager { - return &CommitKVStoreCacheManager{ - caches: map[string]types.CommitKVStore{ - "a1": nil, - "alalalalalal": nil, - }, - } -} - -func populate(mgr *CommitKVStoreCacheManager) { - mgr.caches["this one"] = (types.CommitKVStore)(nil) - mgr.caches["those ones are the ones"] = (types.CommitKVStore)(nil) - mgr.caches["very huge key right here and there are we going to ones are the ones"] = (types.CommitKVStore)(nil) -} - -func BenchmarkReset(b *testing.B) { - b.ReportAllocs() - mgr := freshMgr() - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - mgr.Reset() - if len(mgr.caches) != 0 { - b.Fatal("Reset failed") - } - populate(mgr) - if len(mgr.caches) == 0 { - b.Fatal("populate failed") - } - mgr.Reset() - if len(mgr.caches) != 0 { - b.Fatal("Reset failed") - } - } - - if mgr == nil { - b.Fatal("Impossible condition") - } -} diff --git a/store/cache/cache.go b/store/cache/cache.go deleted file mode 100644 index 748eae8c42..0000000000 --- a/store/cache/cache.go +++ /dev/null @@ -1,132 +0,0 @@ -package cache - -import ( - "fmt" - - lru "github.com/hashicorp/golang-lru" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/types" -) - -var ( - _ types.CommitKVStore = (*CommitKVStoreCache)(nil) - _ types.MultiStorePersistentCache = (*CommitKVStoreCacheManager)(nil) - - // DefaultCommitKVStoreCacheSize defines the persistent ARC cache size for a - // CommitKVStoreCache. - DefaultCommitKVStoreCacheSize uint = 1000 -) - -type ( - // CommitKVStoreCache implements an inter-block (persistent) cache that wraps a - // CommitKVStore. Reads first hit the internal ARC (Adaptive Replacement Cache). - // During a cache miss, the read is delegated to the underlying CommitKVStore - // and cached. Deletes and writes always happen to both the cache and the - // CommitKVStore in a write-through manner. Caching performed in the - // CommitKVStore and below is completely irrelevant to this layer. - CommitKVStoreCache struct { - types.CommitKVStore - cache *lru.ARCCache - } - - // CommitKVStoreCacheManager maintains a mapping from a StoreKey to a - // CommitKVStoreCache. Each CommitKVStore, per StoreKey, is meant to be used - // in an inter-block (persistent) manner and typically provided by a - // CommitMultiStore. - CommitKVStoreCacheManager struct { - cacheSize uint - caches map[string]types.CommitKVStore - } -) - -func NewCommitKVStoreCache(store types.CommitKVStore, size uint) *CommitKVStoreCache { - cache, err := lru.NewARC(int(size)) - if err != nil { - panic(fmt.Errorf("failed to create KVStore cache: %w", err)) - } - - return &CommitKVStoreCache{ - CommitKVStore: store, - cache: cache, - } -} - -func NewCommitKVStoreCacheManager(size uint) *CommitKVStoreCacheManager { - return &CommitKVStoreCacheManager{ - cacheSize: size, - caches: make(map[string]types.CommitKVStore), - } -} - -// GetStoreCache returns a Cache from the CommitStoreCacheManager for a given -// StoreKey. If no Cache exists for the StoreKey, then one is created and set. -// The returned Cache is meant to be used in a persistent manner. -func (cmgr *CommitKVStoreCacheManager) GetStoreCache(key types.StoreKey, store types.CommitKVStore) types.CommitKVStore { - if cmgr.caches[key.Name()] == nil { - cmgr.caches[key.Name()] = NewCommitKVStoreCache(store, cmgr.cacheSize) - } - - return cmgr.caches[key.Name()] -} - -// Unwrap returns the underlying CommitKVStore for a given StoreKey. -func (cmgr *CommitKVStoreCacheManager) Unwrap(key types.StoreKey) types.CommitKVStore { - if ckv, ok := cmgr.caches[key.Name()]; ok { - return ckv.(*CommitKVStoreCache).CommitKVStore - } - - return nil -} - -// Reset resets in the internal caches. -func (cmgr *CommitKVStoreCacheManager) Reset() { - // Clear the map. - // Please note that we are purposefully using the map clearing idiom. - // See https://github.com/cosmos/cosmos-sdk/issues/6681. - for key := range cmgr.caches { - delete(cmgr.caches, key) - } -} - -// CacheWrap implements the CacheWrapper interface -func (ckv *CommitKVStoreCache) CacheWrap() types.CacheWrap { - return cachekv.NewStore(ckv) -} - -// Get retrieves a value by key. It will first look in the write-through cache. -// If the value doesn't exist in the write-through cache, the query is delegated -// to the underlying CommitKVStore. -func (ckv *CommitKVStoreCache) Get(key []byte) []byte { - types.AssertValidKey(key) - - keyStr := string(key) - valueI, ok := ckv.cache.Get(keyStr) - if ok { - // cache hit - return valueI.([]byte) - } - - // cache miss; write to cache - value := ckv.CommitKVStore.Get(key) - ckv.cache.Add(keyStr, value) - - return value -} - -// Set inserts a key/value pair into both the write-through cache and the -// underlying CommitKVStore. -func (ckv *CommitKVStoreCache) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - - ckv.cache.Add(string(key), value) - ckv.CommitKVStore.Set(key, value) -} - -// Delete removes a key/value pair from both the write-through cache and the -// underlying CommitKVStore. -func (ckv *CommitKVStoreCache) Delete(key []byte) { - ckv.cache.Remove(string(key)) - ckv.CommitKVStore.Delete(key) -} diff --git a/store/cache/cache_test.go b/store/cache/cache_test.go deleted file mode 100644 index efbf22c8e1..0000000000 --- a/store/cache/cache_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/cache" - "cosmossdk.io/store/cachekv" - iavlstore "cosmossdk.io/store/iavl" - "cosmossdk.io/store/types" - "cosmossdk.io/store/wrapper" -) - -func TestGetOrSetStoreCache(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - store2 := mngr.GetStoreCache(sKey, store) - - require.NotNil(t, store2) - require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) -} - -func TestUnwrap(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - _ = mngr.GetStoreCache(sKey, store) - - require.Equal(t, store, mngr.Unwrap(sKey)) - require.Nil(t, mngr.Unwrap(types.NewKVStoreKey("test2"))) -} - -func TestStoreCache(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - kvStore := mngr.GetStoreCache(sKey, store) - - for i := uint(0); i < cache.DefaultCommitKVStoreCacheSize*2; i++ { - key := []byte(fmt.Sprintf("key_%d", i)) - value := []byte(fmt.Sprintf("value_%d", i)) - - kvStore.Set(key, value) - - res := kvStore.Get(key) - require.Equal(t, res, value) - require.Equal(t, res, store.Get(key)) - - kvStore.Delete(key) - - require.Nil(t, kvStore.Get(key)) - require.Nil(t, store.Get(key)) - } -} - -func TestReset(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - store2 := mngr.GetStoreCache(sKey, store) - - require.NotNil(t, store2) - require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) - - // reset and check if the cache is gone - mngr.Reset() - require.Nil(t, mngr.Unwrap(sKey)) - - // check if the cache is recreated - require.Equal(t, store2, mngr.GetStoreCache(sKey, store)) -} - -func TestCacheWrap(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) - - sKey := types.NewKVStoreKey("test") - tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger()) - store := iavlstore.UnsafeNewStore(tree) - - cacheWrapper := mngr.GetStoreCache(sKey, store).CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) -} diff --git a/store/cachekv/README.md b/store/cachekv/README.md deleted file mode 100644 index 66f0916dea..0000000000 --- a/store/cachekv/README.md +++ /dev/null @@ -1,140 +0,0 @@ -# CacheKVStore specification - -A `CacheKVStore` is cache wrapper for a `KVStore`. It extends the operations of the `KVStore` to work with a write-back cache, allowing for reduced I/O operations and more efficient disposing of changes (e.g. after processing a failed transaction). - -The core goals the CacheKVStore seeks to solve are: - -* Buffer all writes to the parent store, so they can be dropped if they need to be reverted -* Allow iteration over contiguous spans of keys -* Act as a cache, improving access time for reads that have already been done (by replacing tree access with hashtable access, avoiding disk I/O) - * Note: We actually fail to achieve this for iteration right now - * Note: Need to consider this getting too large and dropping some cached reads -* Make subsequent reads account for prior buffered writes -* Write all buffered changes to the parent store - -We should revisit these goals with time (for instance it's unclear that all disk writes need to be buffered to the end of the block), but this is the current status. - -## Types and Structs - -```go -type Store struct { - mtx sync.Mutex - cache map[string]*cValue - deleted map[string]struct{} - unsortedCache map[string]struct{} - sortedCache *dbm.MemDB // always ascending sorted - parent types.KVStore -} -``` - -The Store struct wraps the underlying `KVStore` (`parent`) with additional data structures for implementing the cache. Mutex is used as IAVL trees (the `KVStore` in application) are not safe for concurrent use. - -### `cache` - -The main mapping of key-value pairs stored in cache. This map contains both keys that are cached from read operations as well as ‘dirty’ keys which map to a value that is potentially different than what is in the underlying `KVStore`. - -Values that are mapped to in `cache` are wrapped in a `cValue` struct, which contains the value and a boolean flag (`dirty`) representing whether the value has been written since the last write-back to `parent`. - -```go -type cValue struct { - value []byte - dirty bool -} -``` - -### `deleted` - -Key-value pairs that are to be deleted from `parent` are stored in the `deleted` map. Keys are mapped to an empty struct to implement a set. - -### `unsortedCache` - -Similar to `deleted`, this is a set of keys that are dirty and will need to be updated in the parent `KVStore` upon a write. Keys are mapped to an empty struct to implement a set. - -### `sortedCache` - -A database that will be populated by the keys in `unsortedCache` during iteration over the cache. The keys are always held in sorted order. - -## CRUD Operations and Writing - -The `Set`, `Get`, and `Delete` functions all call `setCacheValue()`, which is the only entry point to mutating `cache` (besides `Write()`, which clears it). - -`setCacheValue()` inserts a key-value pair into `cache`. Two boolean parameters, `deleted` and `dirty`, are passed in to flag whether the inserted key should also be inserted into the `deleted` and `dirty` sets. Keys will be removed from the `deleted` set if they are written to after being deleted. - -### `Get` - -`Get` first attempts to return the value from `cache`. If the key does not exist in `cache`, `parent.Get()` is called instead. This value from the parent is passed into `setCacheValue()` with `deleted=false` and `dirty=false`. - -### `Has` - -`Has` returns true if `Get` returns a non-nil value. As a result of calling `Get`, it may mutate the cache by caching the read. - -### `Set` - -New values are written by setting or updating the value of a key in `cache`. `Set` does not write to `parent`. - -Calls `setCacheValue()` with `deleted=false` and `dirty=true`. - -### `Delete` - -A value being deleted from the `KVStore` is represented with a `nil` value in `cache`, and an insertion of the key into the `deleted` set. `Delete` does not write to `parent`. - -Calls `setCacheValue()` with `deleted=true` and `dirty=true`. - -### `Write` - -Key-value pairs in the cache are written to `parent` in ascending order of their keys. - -A slice of all dirty keys in `cache` is made, then sorted in increasing order. These keys are iterated over to update `parent`. - -If a key is marked for deletion (checked with `isDeleted()`), then `parent.Delete()` is called. Otherwise, `parent.Set()` is called to update the underlying `KVStore` with the value in cache. - -## Iteration - -Efficient iteration over keys in `KVStore` is important for generating Merkle range proofs. Iteration over `CacheKVStore` requires producing all key-value pairs from the underlying `KVStore` while taking into account updated values from the cache. - -In the current implementation, there is no guarantee that all values in `parent` have been cached. As a result, iteration is achieved by interleaved iteration through both `parent` and the cache (failing to actually benefit from caching). - -[cacheMergeIterator](https://github.com/cosmos/cosmos-sdk/blob/d8391cb6796d770b02448bee70b865d824e43449/store/cachekv/mergeiterator.go) implements functions to provide a single iterator with an input of iterators over `parent` and the cache. This iterator iterates over keys from both iterators in a shared lexicographic order, and overrides the value provided by the parent iterator if the same key is dirty or deleted in the cache. - -### Implementation Overview - -Iterators over `parent` and the cache are generated and passed into `cacheMergeIterator`, which returns a single, interleaved iterator. Implementation of the `parent` iterator is up to the underlying `KVStore`. The remainder of this section covers the generation of the cache iterator. - -Recall that `unsortedCache` is an unordered set of dirty cache keys. Our goal is to construct an ordered iterator over cache keys that fall within the `start` and `end` bounds requested. - -Generating the cache iterator can be decomposed into four parts: - -1. Finding all keys that exist in the range we are iterating over -2. Sorting this list of keys -3. Inserting these keys into `sortedCache` and removing them from `unsortedCache` -4. Returning an iterator over `sortedCache` with the desired range - -Currently, the implementation for the first two parts is split into two cases, depending on the size of the unsorted cache. The two cases are as follows. - -If the size of `unsortedCache` is less than `minSortSize` (currently 1024), a linear time approach is taken to search over keys. - -```go -n := len(store.unsortedCache) -unsorted := make([]*kv.Pair, 0) - -if n < minSortSize { - for key := range store.unsortedCache { - if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { - cacheValue := store.cache[key] - unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - } - store.clearUnsortedCacheSubset(unsorted, stateUnsorted) - return -} -``` - -Here, we iterate through all the keys in `unsortedCache` (i.e., the dirty cache keys), collecting those within the requested range in an unsorted slice called `unsorted`. - -At this point, part 3. is achieved in `clearUnsortedCacheSubset()`. This function iterates through `unsorted`, removing each key from `unsortedCache`. Afterwards, `unsorted` is sorted. Lastly, it iterates through the now sorted slice, inserting key-value pairs into `sortedCache`. Any key marked for deletion is mapped to an arbitrary value (`[]byte{}`). - -In the case that the size of `unsortedCache` is larger than `minSortSize`, a linear time approach to finding keys within the desired range is too slow to use. Instead, a slice of all keys in `unsortedCache` is sorted, and binary search is used to find the beginning and ending indices of the desired range. This produces an already-sorted slice that is passed into the same `clearUnsortedCacheSubset()` function. An iota identifier (`sortedState`) is used to skip the sorting step in the function. - -Finally, part 4. is achieved with `memIterator`, which implements an iterator over the items in `sortedCache`. - -As of [PR #12885](https://github.com/cosmos/cosmos-sdk/pull/12885), an optimization to the binary search case mitigates the overhead of sorting the entirety of the key set in `unsortedCache`. To avoid wasting the compute spent sorting, we should ensure that a reasonable amount of values are removed from `unsortedCache`. If the length of the range for iteration is less than `minSortedCache`, we widen the range of values for removal from `unsortedCache` to be up to `minSortedCache` in length. This amortizes the cost of processing elements across multiple calls. \ No newline at end of file diff --git a/store/cachekv/bench_helper_test.go b/store/cachekv/bench_helper_test.go deleted file mode 100644 index be7fec4b3a..0000000000 --- a/store/cachekv/bench_helper_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package cachekv_test - -import "crypto/rand" - -func randSlice(sliceSize int) []byte { - bz := make([]byte, sliceSize) - _, _ = rand.Read(bz) - return bz -} - -func incrementByteSlice(bz []byte) { - for index := len(bz) - 1; index >= 0; index-- { - if bz[index] < 255 { - bz[index]++ - break - } else { - bz[index] = 0 - } - } -} - -// Generate many keys starting at startKey, and are in sequential order -func generateSequentialKeys(startKey []byte, numKeys int) [][]byte { - toReturn := make([][]byte, 0, numKeys) - cur := make([]byte, len(startKey)) - copy(cur, startKey) - for i := 0; i < numKeys; i++ { - newKey := make([]byte, len(startKey)) - copy(newKey, cur) - toReturn = append(toReturn, newKey) - incrementByteSlice(cur) - } - return toReturn -} - -// Generate many random, unsorted keys -func generateRandomKeys(keySize, numKeys int) [][]byte { - toReturn := make([][]byte, 0, numKeys) - for i := 0; i < numKeys; i++ { - newKey := randSlice(keySize) - toReturn = append(toReturn, newKey) - } - return toReturn -} diff --git a/store/cachekv/benchmark_test.go b/store/cachekv/benchmark_test.go deleted file mode 100644 index 158549b4bd..0000000000 --- a/store/cachekv/benchmark_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package cachekv_test - -import ( - fmt "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/types" -) - -func DoBenchmarkDeepCacheStack(b *testing.B, depth int) { - b.Helper() - db := dbm.NewMemDB() - initialStore := cachekv.NewStore(dbadapter.Store{DB: db}) - - nItems := 20 - for i := 0; i < nItems; i++ { - initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0}) - } - - var stack CacheStack - stack.Reset(initialStore) - - for i := 0; i < depth; i++ { - stack.Snapshot() - - store := stack.CurrentStore() - store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)}) - } - - store := stack.CurrentStore() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - it := store.Iterator(nil, nil) - items := make([][]byte, 0, nItems) - for ; it.Valid(); it.Next() { - items = append(items, it.Key()) - it.Value() - } - it.Close() - require.Equal(b, nItems, len(items)) - } -} - -func BenchmarkDeepCacheStack1(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 1) -} - -func BenchmarkDeepCacheStack3(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 3) -} - -func BenchmarkDeepCacheStack10(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 10) -} - -func BenchmarkDeepCacheStack13(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 13) -} - -// CacheStack manages a stack of nested cache store to -// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods. -type CacheStack struct { - initialStore types.CacheKVStore - // Context of the initial state before transaction execution. - // It's the context used by `StateDB.CommitedState`. - cacheStores []types.CacheKVStore -} - -// CurrentContext returns the top context of cached stack, -// if the stack is empty, returns the initial context. -func (cs *CacheStack) CurrentStore() types.CacheKVStore { - l := len(cs.cacheStores) - if l == 0 { - return cs.initialStore - } - return cs.cacheStores[l-1] -} - -// Reset sets the initial context and clear the cache context stack. -func (cs *CacheStack) Reset(initialStore types.CacheKVStore) { - cs.initialStore = initialStore - cs.cacheStores = nil -} - -// IsEmpty returns true if the cache context stack is empty. -func (cs *CacheStack) IsEmpty() bool { - return len(cs.cacheStores) == 0 -} - -// Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts. -func (cs *CacheStack) Commit() { - // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i >= 0; i-- { - cs.cacheStores[i].Write() - } - cs.cacheStores = nil -} - -// CommitToRevision commit the cache after the target revision, -// to improve efficiency of db operations. -func (cs *CacheStack) CommitToRevision(target int) error { - if target < 0 || target >= len(cs.cacheStores) { - return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)) - } - - // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i > target; i-- { - cs.cacheStores[i].Write() - } - cs.cacheStores = cs.cacheStores[0 : target+1] - - return nil -} - -// Snapshot pushes a new cached context to the stack, -// and returns the index of it. -func (cs *CacheStack) Snapshot() int { - cs.cacheStores = append(cs.cacheStores, cachekv.NewStore(cs.CurrentStore())) - return len(cs.cacheStores) - 1 -} - -// RevertToSnapshot pops all the cached contexts after the target index (inclusive). -// the target should be snapshot index returned by `Snapshot`. -// This function panics if the index is out of bounds. -func (cs *CacheStack) RevertToSnapshot(target int) { - if target < 0 || target >= len(cs.cacheStores) { - panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))) - } - cs.cacheStores = cs.cacheStores[:target] -} diff --git a/store/cachekv/internal/btree.go b/store/cachekv/internal/btree.go deleted file mode 100644 index 209f7e58c4..0000000000 --- a/store/cachekv/internal/btree.go +++ /dev/null @@ -1,91 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "github.com/tidwall/btree" - - "cosmossdk.io/store/types" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - // copied from memdb. - bTreeDegree = 32 -) - -var errKeyEmpty = errors.New("key cannot be empty") - -// BTree implements the sorted cache for cachekv store, -// we don't use MemDB here because cachekv is used extensively in sdk core path, -// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. -// -// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. -type BTree struct { - tree *btree.BTreeG[item] -} - -// NewBTree creates a wrapper around `btree.BTreeG`. -func NewBTree() BTree { - return BTree{ - tree: btree.NewBTreeGOptions(byKeys, btree.Options{ - Degree: bTreeDegree, - NoLocks: false, - }), - } -} - -func (bt BTree) Set(key, value []byte) { - bt.tree.Set(newItem(key, value)) -} - -func (bt BTree) Get(key []byte) []byte { - i, found := bt.tree.Get(newItem(key, nil)) - if !found { - return nil - } - return i.value -} - -func (bt BTree) Delete(key []byte) { - bt.tree.Delete(newItem(key, nil)) -} - -func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, true), nil -} - -func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, false), nil -} - -// Copy the tree. This is a copy-on-write operation and is very fast because -// it only performs a shadowed copy. -func (bt BTree) Copy() BTree { - return BTree{ - tree: bt.tree.Copy(), - } -} - -// item is a btree item with byte slices as keys and values -type item struct { - key []byte - value []byte -} - -// byKeys compares the items by key -func byKeys(a, b item) bool { - return bytes.Compare(a.key, b.key) == -1 -} - -// newItem creates a new pair item. -func newItem(key, value []byte) item { - return item{key: key, value: value} -} diff --git a/store/cachekv/internal/btree_test.go b/store/cachekv/internal/btree_test.go deleted file mode 100644 index 06437997f6..0000000000 --- a/store/cachekv/internal/btree_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package internal - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" -) - -func TestGetSetDelete(t *testing.T) { - db := NewBTree() - - // A nonexistent key should return nil. - value := db.Get([]byte("a")) - require.Nil(t, value) - - // Set and get a value. - db.Set([]byte("a"), []byte{0x01}) - db.Set([]byte("b"), []byte{0x02}) - value = db.Get([]byte("a")) - require.Equal(t, []byte{0x01}, value) - - value = db.Get([]byte("b")) - require.Equal(t, []byte{0x02}, value) - - // Deleting a non-existent value is fine. - db.Delete([]byte("x")) - - // Delete a value. - db.Delete([]byte("a")) - - value = db.Get([]byte("a")) - require.Nil(t, value) - - db.Delete([]byte("b")) - - value = db.Get([]byte("b")) - require.Nil(t, value) -} - -func TestDBIterator(t *testing.T) { - db := NewBTree() - - for i := 0; i < 10; i++ { - if i != 6 { // but skip 6. - db.Set(int642Bytes(int64(i)), []byte{}) - } - } - - // Blank iterator keys should error - _, err := db.ReverseIterator([]byte{}, nil) - require.Equal(t, errKeyEmpty, err) - _, err = db.ReverseIterator(nil, []byte{}) - require.Equal(t, errKeyEmpty, err) - - itr, err := db.Iterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") - - ritr, err := db.ReverseIterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") - - itr, err = db.Iterator(nil, int642Bytes(0)) - require.NoError(t, err) - verifyIterator(t, itr, []int64(nil), "forward iterator to 0") - - ritr, err = db.ReverseIterator(int642Bytes(10), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)") - - itr, err = db.Iterator(int642Bytes(0), nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") - - itr, err = db.Iterator(int642Bytes(1), nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") - - ritr, err = db.ReverseIterator(nil, int642Bytes(10)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") - - ritr, err = db.ReverseIterator(nil, int642Bytes(9)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") - - ritr, err = db.ReverseIterator(nil, int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8") - - itr, err = db.Iterator(int642Bytes(6), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7") - - itr, err = db.Iterator(int642Bytes(6), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8") - - itr, err = db.Iterator(int642Bytes(7), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{5, 4}, "reverse iterator from 6 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{5, 4}, "reverse iterator from 7 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5") - - ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5") - - ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64(nil), "reverse iterator from 7 (ex) to 6") - - ritr, err = db.ReverseIterator(int642Bytes(10), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") - - ritr, err = db.ReverseIterator(int642Bytes(6), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") - - ritr, err = db.ReverseIterator(int642Bytes(5), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") - - ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8") - - ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{3, 2}, "reverse iterator from 4 (ex) to 2") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64(nil), "reverse iterator from 2 (ex) to 4") - - // Ensure that the iterators don't panic with an empty database. - db2 := NewBTree() - - itr, err = db2.Iterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, itr, nil, "forward iterator with empty db") - - ritr, err = db2.ReverseIterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, ritr, nil, "reverse iterator with empty db") -} - -func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) { - t.Helper() - i := 0 - for itr.Valid() { - key := itr.Key() - require.Equal(t, expected[i], bytes2Int64(key), "iterator: %d mismatches", i) - itr.Next() - i++ - } - require.Equal(t, i, len(expected), "expected to have fully iterated over all the elements in iter") - require.NoError(t, itr.Close()) -} - -func int642Bytes(i int64) []byte { - return types.Uint64ToBigEndian(uint64(i)) -} - -func bytes2Int64(buf []byte) int64 { - return int64(types.BigEndianToUint64(buf)) -} diff --git a/store/cachekv/internal/memiterator.go b/store/cachekv/internal/memiterator.go deleted file mode 100644 index 9dbba75870..0000000000 --- a/store/cachekv/internal/memiterator.go +++ /dev/null @@ -1,120 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "github.com/tidwall/btree" - - "cosmossdk.io/store/types" -) - -var _ types.Iterator = (*memIterator)(nil) - -// memIterator iterates over iterKVCache items. -// if value is nil, means it was deleted. -// Implements Iterator. -type memIterator struct { - iter btree.IterG[item] - - start []byte - end []byte - ascending bool - valid bool -} - -func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { - iter := items.tree.Iter() - var valid bool - if ascending { - if start != nil { - valid = iter.Seek(newItem(start, nil)) - } else { - valid = iter.First() - } - } else { - if end != nil { - valid = iter.Seek(newItem(end, nil)) - if !valid { - valid = iter.Last() - } else { - // end is exclusive - valid = iter.Prev() - } - } else { - valid = iter.Last() - } - } - - mi := &memIterator{ - iter: iter, - start: start, - end: end, - ascending: ascending, - valid: valid, - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } - - return mi -} - -func (mi *memIterator) Domain() (start, end []byte) { - return mi.start, mi.end -} - -func (mi *memIterator) Close() error { - mi.iter.Release() - return nil -} - -func (mi *memIterator) Error() error { - if !mi.Valid() { - return errors.New("invalid memIterator") - } - return nil -} - -func (mi *memIterator) Valid() bool { - return mi.valid -} - -func (mi *memIterator) Next() { - mi.assertValid() - - if mi.ascending { - mi.valid = mi.iter.Next() - } else { - mi.valid = mi.iter.Prev() - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } -} - -func (mi *memIterator) keyInRange(key []byte) bool { - if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { - return false - } - if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 { - return false - } - return true -} - -func (mi *memIterator) Key() []byte { - return mi.iter.Item().key -} - -func (mi *memIterator) Value() []byte { - return mi.iter.Item().value -} - -func (mi *memIterator) assertValid() { - if err := mi.Error(); err != nil { - panic(err) - } -} diff --git a/store/cachekv/internal/mergeiterator.go b/store/cachekv/internal/mergeiterator.go deleted file mode 100644 index 58e9497b30..0000000000 --- a/store/cachekv/internal/mergeiterator.go +++ /dev/null @@ -1,235 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "cosmossdk.io/store/types" -) - -// cacheMergeIterator merges a parent Iterator and a cache Iterator. -// The cache iterator may return nil keys to signal that an item -// had been deleted (but not deleted in the parent). -// If the cache iterator has the same key as the parent, the -// cache shadows (overrides) the parent. -// -// TODO: Optimize by memoizing. -type cacheMergeIterator struct { - parent types.Iterator - cache types.Iterator - ascending bool - - valid bool -} - -var _ types.Iterator = (*cacheMergeIterator)(nil) - -func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator { - iter := &cacheMergeIterator{ - parent: parent, - cache: cache, - ascending: ascending, - } - - iter.valid = iter.skipUntilExistsOrInvalid() - return iter -} - -// Domain implements Iterator. -// Returns parent domain because cache and parent domains are the same. -func (iter *cacheMergeIterator) Domain() (start, end []byte) { - return iter.parent.Domain() -} - -// Valid implements Iterator. -func (iter *cacheMergeIterator) Valid() bool { - return iter.valid -} - -// Next implements Iterator -func (iter *cacheMergeIterator) Next() { - iter.assertValid() - - switch { - case !iter.parent.Valid(): - // If parent is invalid, get the next cache item. - iter.cache.Next() - case !iter.cache.Valid(): - // If cache is invalid, get the next parent item. - iter.parent.Next() - default: - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - switch iter.compare(keyP, keyC) { - case -1: // parent < cache - iter.parent.Next() - case 0: // parent == cache - iter.parent.Next() - iter.cache.Next() - case 1: // parent > cache - iter.cache.Next() - } - } - iter.valid = iter.skipUntilExistsOrInvalid() -} - -// Key implements Iterator -func (iter *cacheMergeIterator) Key() []byte { - iter.assertValid() - - // If parent is invalid, get the cache key. - if !iter.parent.Valid() { - return iter.cache.Key() - } - - // If cache is invalid, get the parent key. - if !iter.cache.Valid() { - return iter.parent.Key() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return keyP - case 0: // parent == cache - return keyP - case 1: // parent > cache - return keyC - default: - panic("invalid compare result") - } -} - -// Value implements Iterator -func (iter *cacheMergeIterator) Value() []byte { - iter.assertValid() - - // If parent is invalid, get the cache value. - if !iter.parent.Valid() { - return iter.cache.Value() - } - - // If cache is invalid, get the parent value. - if !iter.cache.Valid() { - return iter.parent.Value() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return iter.parent.Value() - case 0: // parent == cache - return iter.cache.Value() - case 1: // parent > cache - return iter.cache.Value() - default: - panic("invalid comparison result") - } -} - -// Close implements Iterator -func (iter *cacheMergeIterator) Close() error { - err1 := iter.cache.Close() - if err := iter.parent.Close(); err != nil { - return err - } - - return err1 -} - -// Error returns an error if the cacheMergeIterator is invalid defined by the -// Valid method. -func (iter *cacheMergeIterator) Error() error { - if !iter.Valid() { - return errors.New("invalid cacheMergeIterator") - } - - return nil -} - -// If not valid, panics. -// NOTE: May have side-effect of iterating over cache. -func (iter *cacheMergeIterator) assertValid() { - if err := iter.Error(); err != nil { - panic(err) - } -} - -// Like bytes.Compare but opposite if not ascending. -func (iter *cacheMergeIterator) compare(a, b []byte) int { - if iter.ascending { - return bytes.Compare(a, b) - } - - return bytes.Compare(a, b) * -1 -} - -// Skip all delete-items from the cache w/ `key < until`. After this function, -// current cache item is a non-delete-item, or `until <= key`. -// If the current cache item is not a delete item, does nothing. -// If `until` is nil, there is no limit, and cache may end up invalid. -// CONTRACT: cache is valid. -func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { - for iter.cache.Valid() && - iter.cache.Value() == nil && - (until == nil || iter.compare(iter.cache.Key(), until) < 0) { - iter.cache.Next() - } -} - -// Fast forwards cache (or parent+cache in case of deleted items) until current -// item exists, or until iterator becomes invalid. -// Returns whether the iterator is valid. -func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { - for { - // If parent is invalid, fast-forward cache. - if !iter.parent.Valid() { - iter.skipCacheDeletes(nil) - return iter.cache.Valid() - } - // Parent is valid. - - if !iter.cache.Valid() { - return true - } - // Parent is valid, cache is valid. - - // Compare parent and cache. - keyP := iter.parent.Key() - keyC := iter.cache.Key() - - switch iter.compare(keyP, keyC) { - case -1: // parent < cache. - return true - - case 0: // parent == cache. - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.parent.Next() - iter.cache.Next() - - continue - } - // Cache is not a delete. - - return true // cache exists. - case 1: // cache < parent - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.skipCacheDeletes(keyP) - continue - } - // Cache is not a delete. - - return true // cache exists. - } - } -} diff --git a/store/cachekv/search_benchmark_test.go b/store/cachekv/search_benchmark_test.go deleted file mode 100644 index ecdc86a8e4..0000000000 --- a/store/cachekv/search_benchmark_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package cachekv - -import ( - "strconv" - "testing" - - "cosmossdk.io/store/cachekv/internal" -) - -func BenchmarkLargeUnsortedMisses(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - store := generateStore() - b.StartTimer() - - for k := 0; k < 10000; k++ { - // cache has A + Z values - // these are within range, but match nothing - store.dirtyItems([]byte("B1"), []byte("B2")) - } - } -} - -func generateStore() *Store { - cache := map[string]*cValue{} - unsorted := map[string]struct{}{} - for i := 0; i < 5000; i++ { - key := "A" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - for i := 0; i < 5000; i++ { - key := "Z" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - return &Store{ - cache: cache, - unsortedCache: unsorted, - sortedCache: internal.NewBTree(), - } -} diff --git a/store/cachekv/search_test.go b/store/cachekv/search_test.go deleted file mode 100644 index 41321c076e..0000000000 --- a/store/cachekv/search_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package cachekv - -import "testing" - -func TestFindStartIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 8, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent but within >=start", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: -1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findStartIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} - -func TestFindEndIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 7, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: -1, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: 4, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findEndIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} diff --git a/store/cachekv/store.go b/store/cachekv/store.go deleted file mode 100644 index 879ce2a416..0000000000 --- a/store/cachekv/store.go +++ /dev/null @@ -1,408 +0,0 @@ -package cachekv - -import ( - "bytes" - "io" - "sort" - "sync" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/math" - "cosmossdk.io/store/cachekv/internal" - "cosmossdk.io/store/internal/conv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -// cValue represents a cached value. -// If dirty is true, it indicates the cached value is different from the underlying value. -type cValue struct { - value []byte - dirty bool -} - -// Store wraps an in-memory cache around an underlying types.KVStore. -type Store struct { - mtx sync.Mutex - cache map[string]*cValue - unsortedCache map[string]struct{} - sortedCache internal.BTree // always ascending sorted - parent types.KVStore -} - -var _ types.CacheKVStore = (*Store)(nil) - -// NewStore creates a new Store object -func NewStore(parent types.KVStore) *Store { - return &Store{ - cache: make(map[string]*cValue), - unsortedCache: make(map[string]struct{}), - sortedCache: internal.NewBTree(), - parent: parent, - } -} - -// GetStoreType implements Store. -func (store *Store) GetStoreType() types.StoreType { - return store.parent.GetStoreType() -} - -// Get implements types.KVStore. -func (store *Store) Get(key []byte) (value []byte) { - store.mtx.Lock() - defer store.mtx.Unlock() - - types.AssertValidKey(key) - - cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)] - if !ok { - value = store.parent.Get(key) - store.setCacheValue(key, value, false) - } else { - value = cacheValue.value - } - - return value -} - -// Set implements types.KVStore. -func (store *Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - - store.mtx.Lock() - defer store.mtx.Unlock() - store.setCacheValue(key, value, true) -} - -// Has implements types.KVStore. -func (store *Store) Has(key []byte) bool { - value := store.Get(key) - return value != nil -} - -// Delete implements types.KVStore. -func (store *Store) Delete(key []byte) { - types.AssertValidKey(key) - - store.mtx.Lock() - defer store.mtx.Unlock() - - store.setCacheValue(key, nil, true) -} - -func (store *Store) resetCaches() { - if len(store.cache) > 100_000 { - // Cache is too large. We likely did something linear time - // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. - // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. - // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem. - store.cache = make(map[string]*cValue) - store.unsortedCache = make(map[string]struct{}) - } else { - // Clear the cache using the map clearing idiom - // and not allocating fresh objects. - // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ - for key := range store.cache { - delete(store.cache, key) - } - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } - store.sortedCache = internal.NewBTree() -} - -// Implements Cachetypes.KVStore. -func (store *Store) Write() { - store.mtx.Lock() - defer store.mtx.Unlock() - - if len(store.cache) == 0 && len(store.unsortedCache) == 0 { - store.sortedCache = internal.NewBTree() - return - } - - type cEntry struct { - key string - val *cValue - } - - // We need a copy of all of the keys. - // Not the best. To reduce RAM pressure, we copy the values as well - // and clear out the old caches right after the copy. - sortedCache := make([]cEntry, 0, len(store.cache)) - - for key, dbValue := range store.cache { - if dbValue.dirty { - sortedCache = append(sortedCache, cEntry{key, dbValue}) - } - } - store.resetCaches() - sort.Slice(sortedCache, func(i, j int) bool { - return sortedCache[i].key < sortedCache[j].key - }) - - // TODO: Consider allowing usage of Batch, which would allow the write to - // at least happen atomically. - for _, obj := range sortedCache { - // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot - // be sure if the underlying store might do a save with the byteslice or - // not. Once we get confirmation that .Delete is guaranteed not to - // save the byteslice, then we can assume only a read-only copy is sufficient. - if obj.val.value != nil { - // It already exists in the parent, hence update it. - store.parent.Set([]byte(obj.key), obj.val.value) - } else { - store.parent.Delete([]byte(obj.key)) - } - } -} - -// CacheWrap implements CacheWrapper. -func (store *Store) CacheWrap() types.CacheWrap { - return NewStore(store) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return NewStore(tracekv.NewStore(store, w, tc)) -} - -//---------------------------------------- -// Iteration - -// Iterator implements types.KVStore. -func (store *Store) Iterator(start, end []byte) types.Iterator { - return store.iterator(start, end, true) -} - -// ReverseIterator implements types.KVStore. -func (store *Store) ReverseIterator(start, end []byte) types.Iterator { - return store.iterator(start, end, false) -} - -func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { - store.mtx.Lock() - defer store.mtx.Unlock() - - store.dirtyItems(start, end) - isoSortedCache := store.sortedCache.Copy() - - var ( - err error - parent, cache types.Iterator - ) - - if ascending { - parent = store.parent.Iterator(start, end) - cache, err = isoSortedCache.Iterator(start, end) - } else { - parent = store.parent.ReverseIterator(start, end) - cache, err = isoSortedCache.ReverseIterator(start, end) - } - if err != nil { - panic(err) - } - - return internal.NewCacheMergeIterator(parent, cache, ascending) -} - -func findStartIndex(strL []string, startQ string) int { - // Modified binary search to find the very first element in >=startQ. - if len(strL) == 0 { - return -1 - } - - var left, right, mid int - right = len(strL) - 1 - for left <= right { - mid = (left + right) >> 1 - midStr := strL[mid] - if midStr == startQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] != midStr { - return i + 1 - } - } - return 0 - } - if midStr < startQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - if left >= 0 && left < len(strL) && strL[left] >= startQ { - return left - } - return -1 -} - -func findEndIndex(strL []string, endQ string) int { - if len(strL) == 0 { - return -1 - } - - // Modified binary search to find the very first element > 1 - midStr := strL[mid] - if midStr == endQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] < midStr { - return i + 1 - } - } - return 0 - } - if midStr < endQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - - // Binary search failed, now let's find a value less than endQ. - for i := right; i >= 0; i-- { - if strL[i] < endQ { - return i - } - } - - return -1 -} - -type sortState int - -const ( - stateUnsorted sortState = iota - stateAlreadySorted -) - -const minSortSize = 1024 - -// Constructs a slice of dirty items, to use w/ memIterator. -func (store *Store) dirtyItems(start, end []byte) { - startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end) - if end != nil && startStr > endStr { - // Nothing to do here. - return - } - - n := len(store.unsortedCache) - unsorted := make([]*kv.Pair, 0) - // If the unsortedCache is too big, its costs too much to determine - // what's in the subset we are concerned about. - // If you are interleaving iterator calls with writes, this can easily become an - // O(N^2) overhead. - // Even without that, too many range checks eventually becomes more expensive - // than just not having the cache. - if n < minSortSize { - for key := range store.unsortedCache { - // dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start - if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { - cacheValue := store.cache[key] - unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - } - store.clearUnsortedCacheSubset(unsorted, stateUnsorted) - return - } - - // Otherwise it is large so perform a modified binary search to find - // the target ranges for the keys that we should be looking for. - strL := make([]string, 0, n) - for key := range store.unsortedCache { - strL = append(strL, key) - } - sort.Strings(strL) - - // Now find the values within the domain - // [start, end) - startIndex := findStartIndex(strL, startStr) - if startIndex < 0 { - startIndex = 0 - } - - var endIndex int - if end == nil { - endIndex = len(strL) - 1 - } else { - endIndex = findEndIndex(strL, endStr) - } - if endIndex < 0 { - endIndex = len(strL) - 1 - } - - // Since we spent cycles to sort the values, we should process and remove a reasonable amount - // ensure start to end is at least minSortSize in size - // if below minSortSize, expand it to cover additional values - // this amortizes the cost of processing elements across multiple calls - if endIndex-startIndex < minSortSize { - endIndex = math.Min(startIndex+minSortSize, len(strL)-1) - if endIndex-startIndex < minSortSize { - startIndex = math.Max(endIndex-minSortSize, 0) - } - } - - kvL := make([]*kv.Pair, 0, 1+endIndex-startIndex) - for i := startIndex; i <= endIndex; i++ { - key := strL[i] - cacheValue := store.cache[key] - kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - - // kvL was already sorted so pass it in as is. - store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) -} - -func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { - n := len(store.unsortedCache) - if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } else { // Otherwise, normally delete the unsorted keys from the map. - for _, kv := range unsorted { - delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) - } - } - - if sortState == stateUnsorted { - sort.Slice(unsorted, func(i, j int) bool { - return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0 - }) - } - - for _, item := range unsorted { - // sortedCache is able to store `nil` value to represent deleted items. - store.sortedCache.Set(item.Key, item.Value) - } -} - -//---------------------------------------- -// etc - -// Only entrypoint to mutate store.cache. -// A `nil` value means a deletion. -func (store *Store) setCacheValue(key, value []byte, dirty bool) { - keyStr := conv.UnsafeBytesToStr(key) - store.cache[keyStr] = &cValue{ - value: value, - dirty: dirty, - } - if dirty { - store.unsortedCache[keyStr] = struct{}{} - } -} diff --git a/store/cachekv/store_bench_test.go b/store/cachekv/store_bench_test.go deleted file mode 100644 index 8f15855e09..0000000000 --- a/store/cachekv/store_bench_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package cachekv_test - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" -) - -var sink interface{} - -const defaultValueSizeBz = 1 << 12 - -// This benchmark measures the time of iterator.Next() when the parent store is blank -func benchmarkBlankParentIteratorNext(b *testing.B, keysize int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - // Use a singleton for value, to not waste time computing it - value := randSlice(defaultValueSizeBz) - // Use simple values for keys, pick a random start, - // and take next b.N keys sequentially after.] - startKey := randSlice(32) - - // Add 1 to avoid issues when b.N = 1 - keys := generateSequentialKeys(startKey, b.N+1) - for _, k := range keys { - kvstore.Set(k, value) - } - - b.ReportAllocs() - b.ResetTimer() - - iter := kvstore.Iterator(keys[0], keys[b.N]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -// Benchmark setting New keys to a store, where the new keys are in sequence. -func benchmarkBlankParentAppend(b *testing.B, keysize int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - - // Use a singleton for value, to not waste time computing it - value := randSlice(32) - // Use simple values for keys, pick a random start, - // and take next b.N keys sequentially after. - startKey := randSlice(32) - - keys := generateSequentialKeys(startKey, b.N) - - b.ReportAllocs() - b.ResetTimer() - - for _, k := range keys { - kvstore.Set(k, value) - } -} - -// Benchmark setting New keys to a store, where the new keys are random. -// the speed of this function does not depend on the values in the parent store -func benchmarkRandomSet(b *testing.B, keysize int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - - // Use a singleton for value, to not waste time computing it - value := randSlice(defaultValueSizeBz) - // Add 1 to avoid issues when b.N = 1 - keys := generateRandomKeys(keysize, b.N+1) - - b.ReportAllocs() - b.ResetTimer() - - for _, k := range keys { - kvstore.Set(k, value) - } - - iter := kvstore.Iterator(keys[0], keys[b.N]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -// Benchmark creating an iterator on a parent with D entries, -// that are all deleted in the cacheKV store. -// We essentially are benchmarking the cacheKV iterator creation & iteration times -// with the number of entries deleted in the parent. -func benchmarkIteratorOnParentWithManyDeletes(b *testing.B, numDeletes int) { - b.Helper() - mem := dbadapter.Store{DB: dbm.NewMemDB()} - - // Use a singleton for value, to not waste time computing it - value := randSlice(32) - // Use simple values for keys, pick a random start, - // and take next D keys sequentially after. - startKey := randSlice(32) - // Add 1 to avoid issues when numDeletes = 1 - keys := generateSequentialKeys(startKey, numDeletes+1) - // setup parent db with D keys. - for _, k := range keys { - mem.Set(k, value) - } - kvstore := cachekv.NewStore(mem) - // Delete all keys from the cache KV store. - // The keys[1:] is to keep at least one entry in parent, due to a bug in the SDK iterator design. - // Essentially the iterator will never be valid, in that it should never run. - // However, this is incompatible with the for loop structure the SDK uses, hence - // causes a panic. Thus we do keys[1:]. - for _, k := range keys[1:] { - kvstore.Delete(k) - } - - b.ReportAllocs() - b.ResetTimer() - - iter := kvstore.Iterator(keys[0], keys[numDeletes]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -func BenchmarkBlankParentIteratorNextKeySize32(b *testing.B) { - benchmarkBlankParentIteratorNext(b, 32) -} - -func BenchmarkBlankParentAppendKeySize32(b *testing.B) { - benchmarkBlankParentAppend(b, 32) -} - -func BenchmarkSetKeySize32(b *testing.B) { - benchmarkRandomSet(b, 32) -} - -func BenchmarkIteratorOnParentWith1MDeletes(b *testing.B) { - benchmarkIteratorOnParentWithManyDeletes(b, 1_000_000) -} diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go deleted file mode 100644 index 220d25dd83..0000000000 --- a/store/cachekv/store_test.go +++ /dev/null @@ -1,694 +0,0 @@ -package cachekv_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/math/unsafe" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/types" -) - -func newCacheKVStore() types.CacheKVStore { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - return cachekv.NewStore(mem) -} - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -func TestCacheKVStore(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - - // put something in mem and in cache - mem.Set(keyFmt(1), valFmt(1)) - st.Set(keyFmt(1), valFmt(1)) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - - // update it in cache, shouldn't change mem - st.Set(keyFmt(1), valFmt(2)) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - require.Equal(t, valFmt(1), mem.Get(keyFmt(1))) - - // write it. should change mem - st.Write() - require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // more writes and checks - st.Write() - st.Write() - require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // make a new one, check it - st = cachekv.NewStore(mem) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // make a new one and delete - should not be removed from mem - st = cachekv.NewStore(mem) - st.Delete(keyFmt(1)) - require.Empty(t, st.Get(keyFmt(1))) - require.Equal(t, mem.Get(keyFmt(1)), valFmt(2)) - - // Write. should now be removed from both - st.Write() - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty") -} - -func TestCacheKVStoreNoNilSet(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - require.Panics(t, func() { st.Set([]byte("key"), nil) }, "setting a nil value should panic") - require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") -} - -func TestCacheKVStoreNested(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - - // set. check its there on st and not on mem. - st.Set(keyFmt(1), valFmt(1)) - require.Empty(t, mem.Get(keyFmt(1))) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - - // make a new from st and check - st2 := cachekv.NewStore(st) - require.Equal(t, valFmt(1), st2.Get(keyFmt(1))) - - // update the value on st2, check it only effects st2 - st2.Set(keyFmt(1), valFmt(3)) - require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - require.Equal(t, valFmt(3), st2.Get(keyFmt(1))) - - // st2 writes to its parent, st. doesn't effect mem - st2.Write() - require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(3), st.Get(keyFmt(1))) - - // updates mem - st.Write() - require.Equal(t, valFmt(3), mem.Get(keyFmt(1))) -} - -func TestCacheKVIteratorBounds(t *testing.T) { - st := newCacheKVStore() - - // set some items - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - itr := st.Iterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, nItems, i) - require.NoError(t, itr.Close()) - - // iterate over none - itr = st.Iterator(bz("money"), nil) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - } - require.Equal(t, 0, i) - require.NoError(t, itr.Close()) - - // iterate over lower - itr = st.Iterator(keyFmt(0), keyFmt(3)) - i = 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, 3, i) - require.NoError(t, itr.Close()) - - // iterate over upper - itr = st.Iterator(keyFmt(2), keyFmt(4)) - i = 2 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, 4, i) - require.NoError(t, itr.Close()) -} - -func TestCacheKVReverseIteratorBounds(t *testing.T) { - st := newCacheKVStore() - - // set some items - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - itr := st.ReverseIterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(nItems-1-i), k) - require.Equal(t, valFmt(nItems-1-i), v) - i++ - } - require.Equal(t, nItems, i) - require.NoError(t, itr.Close()) - - // iterate over none - itr = st.ReverseIterator(bz("money"), nil) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - } - require.Equal(t, 0, i) - require.NoError(t, itr.Close()) - - // iterate over lower - end := 3 - itr = st.ReverseIterator(keyFmt(0), keyFmt(end)) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(end-i), k) - require.Equal(t, valFmt(end-i), v) - } - require.Equal(t, 3, i) - require.NoError(t, itr.Close()) - - // iterate over upper - end = 4 - itr = st.ReverseIterator(keyFmt(2), keyFmt(end)) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(end-i), k) - require.Equal(t, valFmt(end-i), v) - } - require.Equal(t, 2, i) - require.NoError(t, itr.Close()) -} - -func TestCacheKVMergeIteratorBasics(t *testing.T) { - st := newCacheKVStore() - - // set and delete an item in the cache, iterator should be empty - k, v := keyFmt(0), valFmt(0) - st.Set(k, v) - st.Delete(k) - assertIterateDomain(t, st, 0) - - // now set it and assert its there - st.Set(k, v) - assertIterateDomain(t, st, 1) - - // write it and assert its there - st.Write() - assertIterateDomain(t, st, 1) - - // remove it in cache and assert its not - st.Delete(k) - assertIterateDomain(t, st, 0) - - // write the delete and assert its not there - st.Write() - assertIterateDomain(t, st, 0) - - // add two keys and assert they're there - k1, v1 := keyFmt(1), valFmt(1) - st.Set(k, v) - st.Set(k1, v1) - assertIterateDomain(t, st, 2) - - // write it and assert they're there - st.Write() - assertIterateDomain(t, st, 2) - - // remove one in cache and assert it's not - st.Delete(k1) - assertIterateDomain(t, st, 1) - - // write the delete and assert it's not there - st.Write() - assertIterateDomain(t, st, 1) - - // delete the other key in cache and asserts it's empty - st.Delete(k) - assertIterateDomain(t, st, 0) -} - -func TestCacheKVMergeIteratorDeleteLast(t *testing.T) { - st := newCacheKVStore() - - // set some items and write them - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - st.Write() - - // set some more items and leave dirty - for i := nItems; i < nItems*2; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - assertIterateDomain(t, st, nItems*2) - - // delete them all - for i := 0; i < nItems*2; i++ { - last := nItems*2 - 1 - i - st.Delete(keyFmt(last)) - assertIterateDomain(t, st, last) - } -} - -func TestCacheKVMergeIteratorDeletes(t *testing.T) { - st := newCacheKVStore() - truth := dbm.NewMemDB() - - // set some items and write them - nItems := 10 - for i := 0; i < nItems; i++ { - doOp(t, st, truth, opSet, i) - } - st.Write() - - // delete every other item, starting from 0 - for i := 0; i < nItems; i += 2 { - doOp(t, st, truth, opDel, i) - assertIterateDomainCompare(t, st, truth) - } - - // reset - st = newCacheKVStore() - truth = dbm.NewMemDB() - - // set some items and write them - for i := 0; i < nItems; i++ { - doOp(t, st, truth, opSet, i) - } - st.Write() - - // delete every other item, starting from 1 - for i := 1; i < nItems; i += 2 { - doOp(t, st, truth, opDel, i) - assertIterateDomainCompare(t, st, truth) - } -} - -func TestCacheKVMergeIteratorChunks(t *testing.T) { - st := newCacheKVStore() - - // Use the truth to check values on the merge iterator - truth := dbm.NewMemDB() - - // sets to the parent - setRange(t, st, truth, 0, 20) - setRange(t, st, truth, 40, 60) - st.Write() - - // sets to the cache - setRange(t, st, truth, 20, 40) - setRange(t, st, truth, 60, 80) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 80}}) - - // remove some parents and some cache - deleteRange(t, st, truth, 15, 25) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 80}}) - - // remove some parents and some cache - deleteRange(t, st, truth, 35, 45) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {45, 80}}) - - // write, add more to the cache, and delete some cache - st.Write() - setRange(t, st, truth, 38, 42) - deleteRange(t, st, truth, 40, 43) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {38, 40}, {45, 80}}) -} - -func TestCacheKVMergeIteratorDomain(t *testing.T) { - st := newCacheKVStore() - - itr := st.Iterator(nil, nil) - start, end := itr.Domain() - require.Equal(t, start, end) - require.NoError(t, itr.Close()) - - itr = st.Iterator(keyFmt(40), keyFmt(60)) - start, end = itr.Domain() - require.Equal(t, keyFmt(40), start) - require.Equal(t, keyFmt(60), end) - require.NoError(t, itr.Close()) - - start, end = st.ReverseIterator(keyFmt(0), keyFmt(80)).Domain() - require.Equal(t, keyFmt(0), start) - require.Equal(t, keyFmt(80), end) -} - -func TestCacheKVMergeIteratorRandom(t *testing.T) { - st := newCacheKVStore() - truth := dbm.NewMemDB() - - start, end := 25, 975 - max := 1000 - setRange(t, st, truth, start, end) - - // do an op, test the iterator - for i := 0; i < 2000; i++ { - doRandomOp(t, st, truth, max) - assertIterateDomainCompare(t, st, truth) - } -} - -func TestNilEndIterator(t *testing.T) { - const SIZE = 3000 - - tests := []struct { - name string - write bool - startIndex int - end []byte - }{ - {name: "write=false, end=nil", write: false, end: nil, startIndex: 1000}, - {name: "write=false, end=nil; full key scan", write: false, end: nil, startIndex: 2000}, - {name: "write=true, end=nil", write: true, end: nil, startIndex: 1000}, - {name: "write=false, end=non-nil", write: false, end: keyFmt(3000), startIndex: 1000}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - st := newCacheKVStore() - - for i := 0; i < SIZE; i++ { - kstr := keyFmt(i) - st.Set(kstr, valFmt(i)) - } - - if tt.write { - st.Write() - } - - itr := st.Iterator(keyFmt(tt.startIndex), tt.end) - i := tt.startIndex - j := 0 - for itr.Valid() { - require.Equal(t, keyFmt(i), itr.Key()) - require.Equal(t, valFmt(i), itr.Value()) - itr.Next() - i++ - j++ - } - - require.Equal(t, SIZE-tt.startIndex, j) - require.NoError(t, itr.Close()) - }) - } -} - -// TestIteratorDeadlock demonstrate the deadlock issue in cache store. -func TestIteratorDeadlock(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - store := cachekv.NewStore(mem) - // the channel buffer is 64 and received once, so put at least 66 elements. - for i := 0; i < 66; i++ { - store.Set([]byte(fmt.Sprintf("key%d", i)), []byte{1}) - } - it := store.Iterator(nil, nil) - defer it.Close() - store.Set([]byte("key20"), []byte{1}) - // it'll be blocked here with previous version, or enable lock on btree. - it2 := store.Iterator(nil, nil) - defer it2.Close() -} - -//------------------------------------------------------------------------------------------- -// do some random ops - -const ( - opSet = 0 - opSetRange = 1 - opDel = 2 - opDelRange = 3 - opWrite = 4 - - totalOps = 5 // number of possible operations -) - -func randInt(n int) int { - return unsafe.NewRand().Int() % n -} - -// useful for replaying a error case if we find one -func doOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, op int, args ...int) { - t.Helper() - switch op { - case opSet: - k := args[0] - st.Set(keyFmt(k), valFmt(k)) - err := truth.Set(keyFmt(k), valFmt(k)) - require.NoError(t, err) - case opSetRange: - start := args[0] - end := args[1] - setRange(t, st, truth, start, end) - case opDel: - k := args[0] - st.Delete(keyFmt(k)) - err := truth.Delete(keyFmt(k)) - require.NoError(t, err) - case opDelRange: - start := args[0] - end := args[1] - deleteRange(t, st, truth, start, end) - case opWrite: - st.Write() - } -} - -func doRandomOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, maxKey int) { - t.Helper() - r := randInt(totalOps) - switch r { - case opSet: - k := randInt(maxKey) - st.Set(keyFmt(k), valFmt(k)) - err := truth.Set(keyFmt(k), valFmt(k)) - require.NoError(t, err) - case opSetRange: - start := randInt(maxKey - 2) - end := randInt(maxKey-start) + start - setRange(t, st, truth, start, end) - case opDel: - k := randInt(maxKey) - st.Delete(keyFmt(k)) - err := truth.Delete(keyFmt(k)) - require.NoError(t, err) - case opDelRange: - start := randInt(maxKey - 2) - end := randInt(maxKey-start) + start - deleteRange(t, st, truth, start, end) - case opWrite: - st.Write() - } -} - -//------------------------------------------------------------------------------------------- - -// iterate over whole domain -func assertIterateDomain(t *testing.T, st types.KVStore, expectedN int) { - t.Helper() - itr := st.Iterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, expectedN, i) - require.NoError(t, itr.Close()) -} - -func assertIterateDomainCheck(t *testing.T, st types.KVStore, mem dbm.DB, r []keyRange) { - t.Helper() - // iterate over each and check they match the other - itr := st.Iterator(nil, nil) - itr2, err := mem.Iterator(nil, nil) // ground truth - require.NoError(t, err) - - krc := newKeyRangeCounter(r) - i := 0 - - for ; krc.valid(); krc.next() { - require.True(t, itr.Valid()) - require.True(t, itr2.Valid()) - - // check the key/val matches the ground truth - k, v := itr.Key(), itr.Value() - k2, v2 := itr2.Key(), itr2.Value() - require.Equal(t, k, k2) - require.Equal(t, v, v2) - - // check they match the counter - require.Equal(t, k, keyFmt(krc.key())) - - itr.Next() - itr2.Next() - i++ - } - - require.False(t, itr.Valid()) - require.False(t, itr2.Valid()) - require.NoError(t, itr.Close()) - require.NoError(t, itr2.Close()) -} - -func assertIterateDomainCompare(t *testing.T, st types.KVStore, mem dbm.DB) { - t.Helper() - // iterate over each and check they match the other - itr := st.Iterator(nil, nil) - itr2, err := mem.Iterator(nil, nil) // ground truth - require.NoError(t, err) - checkIterators(t, itr, itr2) - checkIterators(t, itr2, itr) - require.NoError(t, itr.Close()) - require.NoError(t, itr2.Close()) -} - -func checkIterators(t *testing.T, itr, itr2 types.Iterator) { - t.Helper() - for ; itr.Valid(); itr.Next() { - require.True(t, itr2.Valid()) - k, v := itr.Key(), itr.Value() - k2, v2 := itr2.Key(), itr2.Value() - require.Equal(t, k, k2) - require.Equal(t, v, v2) - itr2.Next() - } - require.False(t, itr.Valid()) - require.False(t, itr2.Valid()) -} - -//-------------------------------------------------------- - -func setRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { - t.Helper() - for i := start; i < end; i++ { - st.Set(keyFmt(i), valFmt(i)) - err := mem.Set(keyFmt(i), valFmt(i)) - require.NoError(t, err) - } -} - -func deleteRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { - t.Helper() - for i := start; i < end; i++ { - st.Delete(keyFmt(i)) - err := mem.Delete(keyFmt(i)) - require.NoError(t, err) - } -} - -//-------------------------------------------------------- - -type keyRange struct { - start int - end int -} - -func (kr keyRange) len() int { - return kr.end - kr.start -} - -func newKeyRangeCounter(kr []keyRange) *keyRangeCounter { - return &keyRangeCounter{keyRanges: kr} -} - -// we can iterate over this and make sure our real iterators have all the right keys -type keyRangeCounter struct { - rangeIdx int - idx int - keyRanges []keyRange -} - -func (krc *keyRangeCounter) valid() bool { - maxRangeIdx := len(krc.keyRanges) - 1 - maxRange := krc.keyRanges[maxRangeIdx] - - // if we're not in the max range, we're valid - if krc.rangeIdx <= maxRangeIdx && - krc.idx < maxRange.len() { - return true - } - - return false -} - -func (krc *keyRangeCounter) next() { - thisKeyRange := krc.keyRanges[krc.rangeIdx] - if krc.idx == thisKeyRange.len()-1 { - krc.rangeIdx++ - krc.idx = 0 - } else { - krc.idx++ - } -} - -func (krc *keyRangeCounter) key() int { - thisKeyRange := krc.keyRanges[krc.rangeIdx] - return thisKeyRange.start + krc.idx -} - -//-------------------------------------------------------- - -func bz(s string) []byte { return []byte(s) } - -func BenchmarkCacheKVStoreGetNoKeyFound(b *testing.B) { - b.ReportAllocs() - st := newCacheKVStore() - b.ResetTimer() - // assumes b.N < 2**24 - for i := 0; i < b.N; i++ { - st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) - } -} - -func BenchmarkCacheKVStoreGetKeyFound(b *testing.B) { - b.ReportAllocs() - st := newCacheKVStore() - for i := 0; i < b.N; i++ { - arr := []byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)} - st.Set(arr, arr) - } - b.ResetTimer() - // assumes b.N < 2**24 - for i := 0; i < b.N; i++ { - st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) - } -} diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go deleted file mode 100644 index 696911370c..0000000000 --- a/store/cachemulti/store.go +++ /dev/null @@ -1,170 +0,0 @@ -package cachemulti - -import ( - "fmt" - "io" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -// storeNameCtxKey is the TraceContext metadata key that identifies -// the store which emitted a given trace. -const storeNameCtxKey = "store_name" - -//---------------------------------------- -// Store - -// Store holds many branched stores. -// Implements MultiStore. -// NOTE: a Store (and MultiStores in general) should never expose the -// keys for the substores. -type Store struct { - db types.CacheKVStore - stores map[types.StoreKey]types.CacheWrap - keys map[string]types.StoreKey - - traceWriter io.Writer - traceContext types.TraceContext -} - -var _ types.CacheMultiStore = Store{} - -// NewFromKVStore creates a new Store object from a mapping of store keys to -// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store -// is a branched store. -func NewFromKVStore( - store types.KVStore, stores map[types.StoreKey]types.CacheWrapper, - keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, -) Store { - cms := Store{ - db: cachekv.NewStore(store), - stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), - keys: keys, - traceWriter: traceWriter, - traceContext: traceContext, - } - - for key, store := range stores { - if cms.TracingEnabled() { - tctx := cms.traceContext.Clone().Merge(types.TraceContext{ - storeNameCtxKey: key.Name(), - }) - - store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx) - } - cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) - } - - return cms -} - -// NewStore creates a new Store object from a mapping of store keys to -// CacheWrapper objects. Each CacheWrapper store is a branched store. -func NewStore( - db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, - traceWriter io.Writer, traceContext types.TraceContext, -) Store { - return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext) -} - -func newCacheMultiStoreFromCMS(cms Store) Store { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range cms.stores { - stores[k] = v - } - - return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext) -} - -// SetTracer sets the tracer for the MultiStore that the underlying -// stores will utilize to trace operations. A MultiStore is returned. -func (cms Store) SetTracer(w io.Writer) types.MultiStore { - cms.traceWriter = w - return cms -} - -// SetTracingContext updates the tracing context for the MultiStore by merging -// the given context with the existing context by key. Any existing keys will -// be overwritten. It is implied that the caller should update the context when -// necessary between tracing operations. It returns a modified MultiStore. -func (cms Store) SetTracingContext(tc types.TraceContext) types.MultiStore { - if cms.traceContext != nil { - for k, v := range tc { - cms.traceContext[k] = v - } - } else { - cms.traceContext = tc - } - - return cms -} - -// TracingEnabled returns if tracing is enabled for the MultiStore. -func (cms Store) TracingEnabled() bool { - return cms.traceWriter != nil -} - -// LatestVersion returns the branch version of the store -func (cms Store) LatestVersion() int64 { - panic("cannot get latest version from branch cached multi-store") -} - -// GetStoreType returns the type of the store. -func (cms Store) GetStoreType() types.StoreType { - return types.StoreTypeMulti -} - -// Write calls Write on each underlying store. -func (cms Store) Write() { - cms.db.Write() - for _, store := range cms.stores { - store.Write() - } -} - -// Implements CacheWrapper. -func (cms Store) CacheWrap() types.CacheWrap { - return cms.CacheMultiStore().(types.CacheWrap) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - return cms.CacheWrap() -} - -// Implements MultiStore. -func (cms Store) CacheMultiStore() types.CacheMultiStore { - return newCacheMultiStoreFromCMS(cms) -} - -// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic -// as an already cached multi-store cannot load previous versions. -// -// TODO: The store implementation can possibly be modified to support this as it -// seems safe to load previous versions (heights). -func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) { - panic("cannot branch cached multi-store with a version") -} - -// GetStore returns an underlying Store by key. -func (cms Store) GetStore(key types.StoreKey) types.Store { - s := cms.stores[key] - if key == nil || s == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) - } - return s.(types.Store) -} - -// GetKVStore returns an underlying KVStore by key. -func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { - store := cms.stores[key] - if key == nil || store == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) - } - return store.(types.KVStore) -} diff --git a/store/cachemulti/store_test.go b/store/cachemulti/store_test.go deleted file mode 100644 index 0ea7785bff..0000000000 --- a/store/cachemulti/store_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package cachemulti - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" -) - -func TestStoreGetKVStore(t *testing.T) { - require := require.New(t) - - s := Store{stores: map[types.StoreKey]types.CacheWrap{}} - key := types.NewKVStoreKey("abc") - errMsg := fmt.Sprintf("kv store with key %v has not been registered in stores", key) - - require.PanicsWithValue(errMsg, - func() { s.GetStore(key) }) - - require.PanicsWithValue(errMsg, - func() { s.GetKVStore(key) }) -} diff --git a/store/dbadapter/store.go b/store/dbadapter/store.go deleted file mode 100644 index d69e4ebf13..0000000000 --- a/store/dbadapter/store.go +++ /dev/null @@ -1,90 +0,0 @@ -package dbadapter - -import ( - "io" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -// Store is wrapper type for dbm.Db with implementation of KVStore -type Store struct { - dbm.DB -} - -// Get wraps the underlying DB's Get method panicking on error. -func (dsa Store) Get(key []byte) []byte { - v, err := dsa.DB.Get(key) - if err != nil { - panic(err) - } - - return v -} - -// Has wraps the underlying DB's Has method panicking on error. -func (dsa Store) Has(key []byte) bool { - ok, err := dsa.DB.Has(key) - if err != nil { - panic(err) - } - - return ok -} - -// Set wraps the underlying DB's Set method panicking on error. -func (dsa Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - if err := dsa.DB.Set(key, value); err != nil { - panic(err) - } -} - -// Delete wraps the underlying DB's Delete method panicking on error. -func (dsa Store) Delete(key []byte) { - if err := dsa.DB.Delete(key); err != nil { - panic(err) - } -} - -// Iterator wraps the underlying DB's Iterator method panicking on error. -func (dsa Store) Iterator(start, end []byte) types.Iterator { - iter, err := dsa.DB.Iterator(start, end) - if err != nil { - panic(err) - } - - return iter -} - -// ReverseIterator wraps the underlying DB's ReverseIterator method panicking on error. -func (dsa Store) ReverseIterator(start, end []byte) types.Iterator { - iter, err := dsa.DB.ReverseIterator(start, end) - if err != nil { - panic(err) - } - - return iter -} - -// GetStoreType returns the type of the store. -func (Store) GetStoreType() types.StoreType { - return types.StoreTypeDB -} - -// CacheWrap branches the underlying store. -func (dsa Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(dsa) -} - -// CacheWrapWithTrace implements KVStore. -func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(dsa, w, tc)) -} - -// dbm.DB implements KVStore so we can CacheKVStore it. -var _ types.KVStore = Store{} diff --git a/store/dbadapter/store_test.go b/store/dbadapter/store_test.go deleted file mode 100644 index 9685887f91..0000000000 --- a/store/dbadapter/store_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package dbadapter_test - -import ( - "bytes" - "errors" - "testing" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/mock" - "cosmossdk.io/store/types" -) - -var errFoo = errors.New("dummy") - -func TestAccessors(t *testing.T) { - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - mockDB := mock.NewMockDB(mockCtrl) - store := dbadapter.Store{mockDB} - key := []byte("test") - value := []byte("testvalue") - - require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - - require.Equal(t, types.StoreTypeDB, store.GetStoreType()) - store.GetStoreType() - - retFoo := []byte("xxx") - mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(retFoo, nil) - require.True(t, bytes.Equal(retFoo, store.Get(key))) - - mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(nil, errFoo) - require.Panics(t, func() { store.Get(key) }) - - mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(true, nil) - require.True(t, store.Has(key)) - - mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, nil) - require.False(t, store.Has(key)) - - mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, errFoo) - require.Panics(t, func() { store.Has(key) }) - - mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(nil) - require.NotPanics(t, func() { store.Set(key, value) }) - - mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(errFoo) - require.Panics(t, func() { store.Set(key, value) }) - - mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(nil) - require.NotPanics(t, func() { store.Delete(key) }) - - mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(errFoo) - require.Panics(t, func() { store.Delete(key) }) - - start, end := []byte("start"), []byte("end") - mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil) - require.NotPanics(t, func() { store.Iterator(start, end) }) - - mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo) - require.Panics(t, func() { store.Iterator(start, end) }) - - mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil) - require.NotPanics(t, func() { store.ReverseIterator(start, end) }) - - mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo) - require.Panics(t, func() { store.ReverseIterator(start, end) }) -} - -func TestCacheWraps(t *testing.T) { - mockCtrl := gomock.NewController(t) - mockDB := mock.NewMockDB(mockCtrl) - store := dbadapter.Store{mockDB} - - cacheWrapper := store.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} diff --git a/store/gaskv/store.go b/store/gaskv/store.go deleted file mode 100644 index 41242d4928..0000000000 --- a/store/gaskv/store.go +++ /dev/null @@ -1,176 +0,0 @@ -package gaskv - -import ( - "io" - - "cosmossdk.io/store/types" -) - -var _ types.KVStore = &Store{} - -// Store applies gas tracking to an underlying KVStore. It implements the -// KVStore interface. -type Store struct { - gasMeter types.GasMeter - gasConfig types.GasConfig - parent types.KVStore -} - -// NewStore returns a reference to a new GasKVStore. -func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store { - kvs := &Store{ - gasMeter: gasMeter, - gasConfig: gasConfig, - parent: parent, - } - return kvs -} - -// GetStoreType implements Store. -func (gs *Store) GetStoreType() types.StoreType { - return gs.parent.GetStoreType() -} - -// Get implements KVStore. -func (gs *Store) Get(key []byte) (value []byte) { - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc) - value = gs.parent.Get(key) - - // TODO overflow-safe math? - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasReadPerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc) - - return value -} - -// Set implements KVStore. -func (gs *Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc) - // TODO overflow-safe math? - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(key)), types.GasWritePerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc) - gs.parent.Set(key, value) -} - -// Has implements KVStore. -func (gs *Store) Has(key []byte) bool { - gs.gasMeter.ConsumeGas(gs.gasConfig.HasCost, types.GasHasDesc) - return gs.parent.Has(key) -} - -// Delete implements KVStore. -func (gs *Store) Delete(key []byte) { - // charge gas to prevent certain attack vectors even though space is being freed - gs.gasMeter.ConsumeGas(gs.gasConfig.DeleteCost, types.GasDeleteDesc) - gs.parent.Delete(key) -} - -// Iterator implements the KVStore interface. It returns an iterator which -// incurs a flat gas cost for seeking to the first key/value pair and a variable -// gas cost based on the current value's length if the iterator is valid. -func (gs *Store) Iterator(start, end []byte) types.Iterator { - return gs.iterator(start, end, true) -} - -// ReverseIterator implements the KVStore interface. It returns a reverse -// iterator which incurs a flat gas cost for seeking to the first key/value pair -// and a variable gas cost based on the current value's length if the iterator -// is valid. -func (gs *Store) ReverseIterator(start, end []byte) types.Iterator { - return gs.iterator(start, end, false) -} - -// CacheWrap implements KVStore. -func (gs *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a GasKVStore") -} - -// CacheWrapWithTrace implements the KVStore interface. -func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a GasKVStore") -} - -func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator - if ascending { - parent = gs.parent.Iterator(start, end) - } else { - parent = gs.parent.ReverseIterator(start, end) - } - - gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent) - gi.(*gasIterator).consumeSeekGas() - - return gi -} - -type gasIterator struct { - gasMeter types.GasMeter - gasConfig types.GasConfig - parent types.Iterator -} - -func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.Iterator) types.Iterator { - return &gasIterator{ - gasMeter: gasMeter, - gasConfig: gasConfig, - parent: parent, - } -} - -// Domain implements Iterator. -func (gi *gasIterator) Domain() (start, end []byte) { - return gi.parent.Domain() -} - -// Valid implements Iterator. -func (gi *gasIterator) Valid() bool { - return gi.parent.Valid() -} - -// Next implements the Iterator interface. It seeks to the next key/value pair -// in the iterator. It incurs a flat gas cost for seeking and a variable gas -// cost based on the current value's length if the iterator is valid. -func (gi *gasIterator) Next() { - gi.consumeSeekGas() - gi.parent.Next() -} - -// Key implements the Iterator interface. It returns the current key and it does -// not incur any gas cost. -func (gi *gasIterator) Key() (key []byte) { - key = gi.parent.Key() - return key -} - -// Value implements the Iterator interface. It returns the current value and it -// does not incur any gas cost. -func (gi *gasIterator) Value() (value []byte) { - value = gi.parent.Value() - return value -} - -// Close implements Iterator. -func (gi *gasIterator) Close() error { - return gi.parent.Close() -} - -// Error delegates the Error call to the parent iterator. -func (gi *gasIterator) Error() error { - return gi.parent.Error() -} - -// consumeSeekGas consumes on each iteration step a flat gas cost and a variable gas cost -// based on the current value's length. -func (gi *gasIterator) consumeSeekGas() { - if gi.Valid() { - key := gi.Key() - value := gi.Value() - - gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasValuePerByteDesc) - gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc) - } - gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc) -} diff --git a/store/gaskv/store_test.go b/store/gaskv/store_test.go deleted file mode 100644 index 354832d17c..0000000000 --- a/store/gaskv/store_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package gaskv_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/gaskv" - "cosmossdk.io/store/types" -) - -func bz(s string) []byte { return []byte(s) } - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -func TestGasKVStoreBasic(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(10000) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - - require.Equal(t, types.StoreTypeDB, st.GetStoreType()) - require.Panics(t, func() { st.CacheWrap() }) - require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) }) - - require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - st.Set(keyFmt(1), valFmt(1)) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - st.Delete(keyFmt(1)) - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - require.Equal(t, meter.GasConsumed(), types.Gas(6858)) -} - -func TestGasKVStoreIterator(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(100000) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - require.False(t, st.Has(keyFmt(1))) - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - require.Empty(t, st.Get(keyFmt(2)), "Expected `key2` to be empty") - require.Empty(t, st.Get(keyFmt(3)), "Expected `key3` to be empty") - - st.Set(keyFmt(1), valFmt(1)) - require.True(t, st.Has(keyFmt(1))) - st.Set(keyFmt(2), valFmt(2)) - require.True(t, st.Has(keyFmt(2))) - st.Set(keyFmt(3), valFmt(0)) - - iterator := st.Iterator(nil, nil) - start, end := iterator.Domain() - require.Nil(t, start) - require.Nil(t, end) - require.NoError(t, iterator.Error()) - - t.Cleanup(func() { - if err := iterator.Close(); err != nil { - t.Fatal(err) - } - }) - ka := iterator.Key() - require.Equal(t, ka, keyFmt(1)) - va := iterator.Value() - require.Equal(t, va, valFmt(1)) - iterator.Next() - kb := iterator.Key() - require.Equal(t, kb, keyFmt(2)) - vb := iterator.Value() - require.Equal(t, vb, valFmt(2)) - iterator.Next() - require.Equal(t, types.Gas(14565), meter.GasConsumed()) - kc := iterator.Key() - require.Equal(t, kc, keyFmt(3)) - vc := iterator.Value() - require.Equal(t, vc, valFmt(0)) - iterator.Next() - require.Equal(t, types.Gas(14667), meter.GasConsumed()) - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.Equal(t, types.Gas(14697), meter.GasConsumed()) - require.NoError(t, iterator.Error()) - - reverseIterator := st.ReverseIterator(nil, nil) - t.Cleanup(func() { - if err := reverseIterator.Close(); err != nil { - t.Fatal(err) - } - }) - require.Equal(t, reverseIterator.Key(), keyFmt(3)) - reverseIterator.Next() - require.Equal(t, reverseIterator.Key(), keyFmt(2)) - reverseIterator.Next() - require.Equal(t, reverseIterator.Key(), keyFmt(1)) - reverseIterator.Next() - require.False(t, reverseIterator.Valid()) - require.Panics(t, reverseIterator.Next) - require.Equal(t, types.Gas(15135), meter.GasConsumed()) -} - -func TestGasKVStoreOutOfGasSet(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(0) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - require.Panics(t, func() { st.Set(keyFmt(1), valFmt(1)) }, "Expected out-of-gas") -} - -func TestGasKVStoreOutOfGasIterator(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - meter := types.NewGasMeter(20000) - st := gaskv.NewStore(mem, meter, types.KVGasConfig()) - st.Set(keyFmt(1), valFmt(1)) - iterator := st.Iterator(nil, nil) - iterator.Next() - require.Panics(t, func() { iterator.Value() }, "Expected out-of-gas") -} diff --git a/store/go.mod b/store/go.mod deleted file mode 100644 index a5c21ccbc9..0000000000 --- a/store/go.mod +++ /dev/null @@ -1,77 +0,0 @@ -module cosmossdk.io/store - -go 1.22.2 - -require ( - cosmossdk.io/errors v1.0.1 - cosmossdk.io/log v1.3.1 - cosmossdk.io/math v1.3.0 - github.com/cometbft/cometbft v1.0.0-rc1 - github.com/cometbft/cometbft/api v1.0.0-rc.1 - github.com/cosmos/cosmos-db v1.0.2 - github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/gogoproto v1.5.0 - github.com/cosmos/iavl v1.2.1-0.20240725141113-7adc688cf179 - github.com/cosmos/ics23/go v0.10.0 - github.com/golang/mock v1.6.0 - github.com/hashicorp/go-hclog v1.6.3 - github.com/hashicorp/go-metrics v0.5.3 - github.com/hashicorp/go-plugin v1.6.1 - github.com/hashicorp/golang-lru v1.0.2 - github.com/stretchr/testify v1.9.0 - github.com/tidwall/btree v1.7.0 - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 - google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.34.2 - gotest.tools/v3 v3.5.1 -) - -require ( - github.com/DataDog/zstd v1.5.5 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cockroachdb/errors v1.11.1 // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.0 // indirect - github.com/cockroachdb/redact v1.1.5 // indirect - github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/dot v1.6.2 // indirect - github.com/fatih/color v1.17.0 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/jhump/protoreflect v1.15.3 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/linxGnu/grocksdb v1.8.14 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/rs/zerolog v1.33.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/store/go.sum b/store/go.sum deleted file mode 100644 index 8f0c16d05b..0000000000 --- a/store/go.sum +++ /dev/null @@ -1,341 +0,0 @@ -cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= -cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= -cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= -cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= -cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= -cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= -github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= -github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= -github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= -github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/cometbft/cometbft v1.0.0-rc1 h1:pYCXw0rKILceyOzHwd+/fGLag8VYemwLUIX6N7V2REw= -github.com/cometbft/cometbft v1.0.0-rc1/go.mod h1:64cB2wvltmK5plHlJFLYOZYGsaTKNW2EZgcHBisHP7o= -github.com/cometbft/cometbft/api v1.0.0-rc.1 h1:GtdXwDGlqwHYs16A4egjwylfYOMYyEacLBrs3Zvpt7g= -github.com/cometbft/cometbft/api v1.0.0-rc.1/go.mod h1:NDFKiBBD8HJC6QQLAoUI99YhsiRZtg2+FJWfk6A6m6o= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs= -github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= -github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= -github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= -github.com/cosmos/gogoproto v1.5.0 h1:SDVwzEqZDDBoslaeZg+dGE55hdzHfgUA40pEanMh52o= -github.com/cosmos/gogoproto v1.5.0/go.mod h1:iUM31aofn3ymidYG6bUR5ZFrk+Om8p5s754eMUcyp8I= -github.com/cosmos/iavl v1.2.1-0.20240725141113-7adc688cf179 h1:wmwDn7V3RodN9auB3FooSQxs46nHVE3u0mb87TJkZFE= -github.com/cosmos/iavl v1.2.1-0.20240725141113-7adc688cf179/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= -github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= -github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= -github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= -github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= -github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= -github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= -github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= -github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d h1:htwtWgtQo8YS6JFWWi2DNgY0RwSGJ1ruMoxY6CUUclk= -github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= -github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 h1:SbSDUWW1PAO24TNpLdeheoYPd7kllICcLU52x6eD4kQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/store/iavl/store.go b/store/iavl/store.go deleted file mode 100644 index 198948b300..0000000000 --- a/store/iavl/store.go +++ /dev/null @@ -1,426 +0,0 @@ -package iavl - -import ( - "errors" - "fmt" - "io" - - cmtprotocrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - ics23 "github.com/cosmos/ics23/go" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" - "cosmossdk.io/store/wrapper" -) - -const ( - DefaultIAVLCacheSize = 500000 -) - -var ( - _ types.KVStore = (*Store)(nil) - _ types.CommitStore = (*Store)(nil) - _ types.CommitKVStore = (*Store)(nil) - _ types.Queryable = (*Store)(nil) - _ types.StoreWithInitialVersion = (*Store)(nil) - _ types.PausablePruner = (*Store)(nil) -) - -// Store Implements types.KVStore and CommitKVStore. -type Store struct { - tree Tree - logger types.Logger - metrics metrics.StoreMetrics -} - -// LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the -// store's version (id) from the provided DB. An error is returned if the version -// fails to load, or if called with a positive version on an empty tree. -func LoadStore(db dbm.DB, logger types.Logger, key types.StoreKey, id types.CommitID, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) { - return LoadStoreWithInitialVersion(db, logger, key, id, 0, cacheSize, disableFastNode, metrics) -} - -// LoadStoreWithInitialVersion returns an IAVL Store as a CommitKVStore setting its initialVersion -// to the one given. Internally, it will load the store's version (id) from the -// provided DB. An error is returned if the version fails to load, or if called with a positive -// version on an empty tree. -func LoadStoreWithInitialVersion(db dbm.DB, logger types.Logger, key types.StoreKey, id types.CommitID, initialVersion uint64, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) { - tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, disableFastNode, logger, iavl.InitialVersionOption(initialVersion), iavl.AsyncPruningOption(true)) - - isUpgradeable, err := tree.IsUpgradeable() - if err != nil { - return nil, err - } - - if isUpgradeable && logger != nil { - logger.Info( - "Upgrading IAVL storage for faster queries + execution on live state. This may take a while", - "store_key", key.String(), - "version", initialVersion, - "commit", fmt.Sprintf("%X", id), - ) - } - - _, err = tree.LoadVersion(id.Version) - if err != nil { - return nil, err - } - - if logger != nil { - logger.Debug("Finished loading IAVL tree") - } - - return &Store{ - tree: tree, - logger: logger, - metrics: metrics, - }, nil -} - -// UnsafeNewStore returns a reference to a new IAVL Store with a given mutable -// IAVL tree reference. It should only be used for testing purposes. -// -// CONTRACT: The IAVL tree should be fully loaded. -// CONTRACT: PruningOptions passed in as argument must be the same as pruning options -// passed into iavl.MutableTree -func UnsafeNewStore(tree *iavl.MutableTree) *Store { - return &Store{ - tree: tree, - metrics: metrics.NewNoOpMetrics(), - } -} - -// GetImmutable returns a reference to a new store backed by an immutable IAVL -// tree at a specific version (height) without any pruning options. This should -// be used for querying and iteration only. If the version does not exist or has -// been pruned, an empty immutable IAVL tree will be used. -// Any mutable operations executed will result in a panic. -func (st *Store) GetImmutable(version int64) (*Store, error) { - if !st.VersionExists(version) { - return nil, errors.New("version mismatch on immutable IAVL tree; version does not exist. Version has either been pruned, or is for a future block height") - } - - iTree, err := st.tree.GetImmutable(version) - if err != nil { - return nil, err - } - - return &Store{ - tree: &immutableTree{iTree}, - metrics: st.metrics, - }, nil -} - -// Commit commits the current store state and returns a CommitID with the new -// version and hash. -func (st *Store) Commit() types.CommitID { - defer st.metrics.MeasureSince("store", "iavl", "commit") - - hash, version, err := st.tree.SaveVersion() - if err != nil { - panic(err) - } - - return types.CommitID{ - Version: version, - Hash: hash, - } -} - -// WorkingHash returns the hash of the current working tree. -func (st *Store) WorkingHash() []byte { - return st.tree.WorkingHash() -} - -// LastCommitID implements Committer. -func (st *Store) LastCommitID() types.CommitID { - return types.CommitID{ - Version: st.tree.Version(), - Hash: st.tree.Hash(), - } -} - -// PausePruning implements CommitKVStore interface. -func (st *Store) PausePruning(pause bool) { - if pause { - st.tree.SetCommitting() - } else { - st.tree.UnsetCommitting() - } -} - -// SetPruning panics as pruning options should be provided at initialization -// since IAVl accepts pruning options directly. -func (st *Store) SetPruning(_ pruningtypes.PruningOptions) { - panic("cannot set pruning options on an initialized IAVL store") -} - -// GetPruning panics as pruning options should be provided at initialization -// since IAVl accepts pruning options directly. -func (st *Store) GetPruning() pruningtypes.PruningOptions { - panic("cannot get pruning options on an initialized IAVL store") -} - -// VersionExists returns whether or not a given version is stored. -func (st *Store) VersionExists(version int64) bool { - return st.tree.VersionExists(version) -} - -// GetAllVersions returns all versions in the iavl tree -func (st *Store) GetAllVersions() []int { - return st.tree.AvailableVersions() -} - -// GetStoreType implements Store. -func (st *Store) GetStoreType() types.StoreType { - return types.StoreTypeIAVL -} - -// CacheWrap implements Store. -func (st *Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(st) -} - -// CacheWrapWithTrace implements the Store interface. -func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(st, w, tc)) -} - -// Set implements types.KVStore. -func (st *Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - _, err := st.tree.Set(key, value) - if err != nil && st.logger != nil { - st.logger.Error("iavl set error", "error", err.Error()) - } -} - -// Get implements types.KVStore. -func (st *Store) Get(key []byte) []byte { - defer st.metrics.MeasureSince("store", "iavl", "get") - value, err := st.tree.Get(key) - if err != nil { - panic(err) - } - return value -} - -// Has implements types.KVStore. -func (st *Store) Has(key []byte) (exists bool) { - defer st.metrics.MeasureSince("store", "iavl", "has") - has, err := st.tree.Has(key) - if err != nil { - panic(err) - } - return has -} - -// Delete implements types.KVStore. -func (st *Store) Delete(key []byte) { - defer st.metrics.MeasureSince("store", "iavl", "delete") - _, _, err := st.tree.Remove(key) - if err != nil { - panic(err) - } -} - -// DeleteVersionsTo deletes versions up to the given version from the MutableTree. An error -// is returned if any single version is invalid or the delete fails. All writes -// happen in a single batch with a single commit. -func (st *Store) DeleteVersionsTo(version int64) error { - return st.tree.DeleteVersionsTo(version) -} - -// LoadVersionForOverwriting attempts to load a tree at a previously committed -// version. Any versions greater than targetVersion will be deleted. -func (st *Store) LoadVersionForOverwriting(targetVersion int64) error { - return st.tree.LoadVersionForOverwriting(targetVersion) -} - -// Iterator implements types.KVStore. -func (st *Store) Iterator(start, end []byte) types.Iterator { - iterator, err := st.tree.Iterator(start, end, true) - if err != nil { - panic(err) - } - return iterator -} - -// ReverseIterator implements types.KVStore. -func (st *Store) ReverseIterator(start, end []byte) types.Iterator { - iterator, err := st.tree.Iterator(start, end, false) - if err != nil { - panic(err) - } - return iterator -} - -// SetInitialVersion sets the initial version of the IAVL tree. It is used when -// starting a new chain at an arbitrary height. -func (st *Store) SetInitialVersion(version int64) { - st.tree.SetInitialVersion(uint64(version)) -} - -// Export exports the IAVL store at the given version, returning an iavl.Exporter for the tree. -func (st *Store) Export(version int64) (*iavl.Exporter, error) { - istore, err := st.GetImmutable(version) - if err != nil { - return nil, errorsmod.Wrapf(err, "iavl export failed for version %v", version) - } - tree, ok := istore.tree.(*immutableTree) - if !ok || tree == nil { - return nil, fmt.Errorf("iavl export failed: unable to fetch tree for version %v", version) - } - return tree.Export() -} - -// Import imports an IAVL tree at the given version, returning an iavl.Importer for importing. -func (st *Store) Import(version int64) (*iavl.Importer, error) { - tree, ok := st.tree.(*iavl.MutableTree) - if !ok { - return nil, errors.New("iavl import failed: unable to find mutable tree") - } - return tree.Import(version) -} - -// Handle gatest the latest height, if height is 0 -func getHeight(tree Tree, req *types.RequestQuery) int64 { - height := req.Height - if height == 0 { - latest := tree.Version() - if tree.VersionExists(latest - 1) { - height = latest - 1 - } else { - height = latest - } - } - return height -} - -// Query implements ABCI interface, allows queries -// -// by default we will return from (latest height -1), -// as we will have merkle proofs immediately (header height = data height + 1) -// If latest-1 is not present, use latest (which must be present) -// if you care to have the latest data to see a tx results, you must -// explicitly set the height you want to see -func (st *Store) Query(req *types.RequestQuery) (res *types.ResponseQuery, err error) { - defer st.metrics.MeasureSince("store", "iavl", "query") - - if len(req.Data) == 0 { - return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrTxDecode, "query cannot be zero length") - } - - tree := st.tree - - // store the height we chose in the response, with 0 being changed to the - // latest height - res = &types.ResponseQuery{ - Height: getHeight(tree, req), - } - - switch req.Path { - case "/key": // get by key - key := req.Data // data holds the key bytes - - res.Key = key - if !st.VersionExists(res.Height) { - res.Log = iavl.ErrVersionDoesNotExist.Error() - break - } - - value, err := tree.GetVersioned(key, res.Height) - if err != nil { - panic(err) - } - res.Value = value - - if !req.Prove { - break - } - - // Continue to prove existence/absence of value - // Must convert store.Tree to iavl.MutableTree with given version to use in CreateProof - iTree, err := tree.GetImmutable(res.Height) - if err != nil { - // sanity check: If value for given version was retrieved, immutable tree must also be retrievable - panic(fmt.Sprintf("version exists in store but could not retrieve corresponding versioned tree in store, %s", err.Error())) - } - mtree := &iavl.MutableTree{ - ImmutableTree: iTree, - } - - // get proof from tree and convert to merkle.Proof before adding to result - res.ProofOps = getProofFromTree(mtree, req.Data, res.Value != nil) - - case "/subspace": - pairs := kv.Pairs{ - Pairs: make([]kv.Pair, 0), - } - - subspace := req.Data - res.Key = subspace - - iterator := types.KVStorePrefixIterator(st, subspace) - for ; iterator.Valid(); iterator.Next() { - pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()}) - } - if err := iterator.Close(); err != nil { - panic(fmt.Errorf("failed to close iterator: %w", err)) - } - - bz, err := pairs.Marshal() - if err != nil { - panic(fmt.Errorf("failed to marshal KV pairs: %w", err)) - } - - res.Value = bz - - default: - return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "unexpected query path: %v", req.Path) - } - - return res, err -} - -// TraverseStateChanges traverses the state changes between two versions and calls the given function. -func (st *Store) TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error { - return st.tree.TraverseStateChanges(startVersion, endVersion, fn) -} - -// Takes a MutableTree, a key, and a flag for creating existence or absence proof and returns the -// appropriate merkle.Proof. Since this must be called after querying for the value, this function should never error -// Thus, it will panic on error rather than returning it -func getProofFromTree(tree *iavl.MutableTree, key []byte, exists bool) *cmtprotocrypto.ProofOps { - var ( - commitmentProof *ics23.CommitmentProof - err error - ) - - if exists { - // value was found - commitmentProof, err = tree.GetMembershipProof(key) - if err != nil { - // sanity check: If value was found, membership proof must be creatable - panic(fmt.Sprintf("unexpected value for empty proof: %s", err.Error())) - } - } else { - // value wasn't found - commitmentProof, err = tree.GetNonMembershipProof(key) - if err != nil { - // sanity check: If value wasn't found, nonmembership proof must be creatable - panic(fmt.Sprintf("unexpected error for nonexistence proof: %s", err.Error())) - } - } - - op := types.NewIavlCommitmentOp(key, commitmentProof) - return &cmtprotocrypto.ProofOps{Ops: []cmtprotocrypto.ProofOp{op.ProofOp()}} -} diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go deleted file mode 100644 index 7ad24d7fe3..0000000000 --- a/store/iavl/store_test.go +++ /dev/null @@ -1,714 +0,0 @@ -package iavl - -import ( - "bytes" - crand "crypto/rand" - "fmt" - "math" - "sort" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/types" - "cosmossdk.io/store/wrapper" -) - -var ( - cacheSize = 100 - treeData = map[string]string{ - "hello": "goodbye", - "aloha": "shalom", - } - nMoreData = 0 -) - -func randBytes(numBytes int) []byte { - b := make([]byte, numBytes) - _, _ = crand.Read(b) - return b -} - -// make a tree with data from above and save it -func newAlohaTree(t *testing.T, db dbm.DB) (*iavl.MutableTree, types.CommitID) { - t.Helper() - tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger()) - - for k, v := range treeData { - _, err := tree.Set([]byte(k), []byte(v)) - require.NoError(t, err) - } - - for i := 0; i < nMoreData; i++ { - key := randBytes(12) - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(t, err) - } - - hash, ver, err := tree.SaveVersion() - require.Nil(t, err) - - return tree, types.CommitID{Version: ver, Hash: hash} -} - -func TestLoadStore(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - // Create non-pruned height H - updated, err := tree.Set([]byte("hello"), []byte("hallo")) - require.NoError(t, err) - require.True(t, updated) - hash, verH, err := tree.SaveVersion() - cIDH := types.CommitID{Version: verH, Hash: hash} - require.Nil(t, err) - - // Create pruned height Hp - updated, err = tree.Set([]byte("hello"), []byte("hola")) - require.NoError(t, err) - require.True(t, updated) - hash, verHp, err := tree.SaveVersion() - cIDHp := types.CommitID{Version: verHp, Hash: hash} - require.Nil(t, err) - - // TODO: Prune this height - - // Create current height Hc - updated, err = tree.Set([]byte("hello"), []byte("ciao")) - require.NoError(t, err) - require.True(t, updated) - hash, verHc, err := tree.SaveVersion() - cIDHc := types.CommitID{Version: verHc, Hash: hash} - require.Nil(t, err) - - // Querying an existing store at some previous non-pruned height H - hStore, err := store.GetImmutable(verH) - require.NoError(t, err) - require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo") - - // Querying an existing store at some previous pruned height Hp - hpStore, err := store.GetImmutable(verHp) - require.NoError(t, err) - require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola") - - // Querying an existing store at current height Hc - hcStore, err := store.GetImmutable(verHc) - require.NoError(t, err) - require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao") - - // Querying a new store at some previous non-pruned height H - newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo") - - // Querying a new store at some previous pruned height Hp - newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola") - - // Querying a new store at current height H - newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao") -} - -func TestGetImmutable(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - updated, err := tree.Set([]byte("hello"), []byte("adios")) - require.NoError(t, err) - require.True(t, updated) - hash, ver, err := tree.SaveVersion() - cID := types.CommitID{Version: ver, Hash: hash} - require.Nil(t, err) - - _, err = store.GetImmutable(cID.Version + 1) - require.Error(t, err) - - newStore, err := store.GetImmutable(cID.Version - 1) - require.NoError(t, err) - require.Equal(t, newStore.Get([]byte("hello")), []byte("goodbye")) - - newStore, err = store.GetImmutable(cID.Version) - require.NoError(t, err) - require.Equal(t, newStore.Get([]byte("hello")), []byte("adios")) - - res, err := newStore.Query(&types.RequestQuery{Data: []byte("hello"), Height: cID.Version, Path: "/key", Prove: true}) - require.NoError(t, err) - require.Equal(t, res.Value, []byte("adios")) - require.NotNil(t, res.ProofOps) - - require.Panics(t, func() { newStore.Set(nil, nil) }) - require.Panics(t, func() { newStore.Delete(nil) }) - require.Panics(t, func() { newStore.Commit() }) -} - -func TestTestGetImmutableIterator(t *testing.T) { - db := dbm.NewMemDB() - tree, cID := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - newStore, err := store.GetImmutable(cID.Version) - require.NoError(t, err) - - iter := newStore.Iterator([]byte("aloha"), []byte("hellz")) - expected := []string{"aloha", "hello"} - var i int - - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - - require.Equal(t, len(expected), i) -} - -func TestIAVLStoreGetSetHasDelete(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - - key := "hello" - - exists := iavlStore.Has([]byte(key)) - require.True(t, exists) - - value := iavlStore.Get([]byte(key)) - require.EqualValues(t, value, treeData[key]) - - value2 := "notgoodbye" - iavlStore.Set([]byte(key), []byte(value2)) - - value = iavlStore.Get([]byte(key)) - require.EqualValues(t, value, value2) - - iavlStore.Delete([]byte(key)) - - exists = iavlStore.Has([]byte(key)) - require.False(t, exists) -} - -func TestIAVLStoreNoNilSet(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - - require.Panics(t, func() { iavlStore.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { iavlStore.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - - require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic") -} - -func TestIAVLIterator(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - iavlStore := UnsafeNewStore(tree) - iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz")) - expected := []string{"aloha", "hello"} - var i int - - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator([]byte("golang"), []byte("rocks")) - expected = []string{"hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, []byte("golang")) - expected = []string{"aloha"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, []byte("shalom")) - expected = []string{"aloha", "hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator(nil, nil) - expected = []string{"aloha", "hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) - - iter = iavlStore.Iterator([]byte("golang"), nil) - expected = []string{"hello"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, treeData[expectedKey]) - i++ - } - require.Equal(t, len(expected), i) -} - -func TestIAVLReverseIterator(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte{0x00}, []byte("0")) - iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0")) - iavlStore.Set([]byte{0x00, 0x01}, []byte("0 1")) - iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2")) - iavlStore.Set([]byte{0x01}, []byte("1")) - - testReverseIterator := func(t *testing.T, start, end []byte, expected []string) { - t.Helper() - iter := iavlStore.ReverseIterator(start, end) - var i int - for i = 0; iter.Valid(); iter.Next() { - expectedValue := expected[i] - value := iter.Value() - require.EqualValues(t, string(value), expectedValue) - i++ - } - require.Equal(t, len(expected), i) - } - - testReverseIterator(t, nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"}) - testReverseIterator(t, []byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) - testReverseIterator(t, []byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"}) - testReverseIterator(t, nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}) -} - -func TestIAVLPrefixIterator(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte("test1"), []byte("test1")) - iavlStore.Set([]byte("test2"), []byte("test2")) - iavlStore.Set([]byte("test3"), []byte("test3")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) - - var i int - - iter := types.KVStorePrefixIterator(iavlStore, []byte("test")) - expected := []string{"test1", "test2", "test3"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, expectedKey) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) - - iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) - expected2 := [][]byte{ - {byte(55), byte(255), byte(255), byte(0)}, - {byte(55), byte(255), byte(255), byte(1)}, - {byte(55), byte(255), byte(255), byte(255)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) - - iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) - expected2 = [][]byte{ - {byte(255), byte(255), byte(0)}, - {byte(255), byte(255), byte(1)}, - {byte(255), byte(255), byte(255)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - iter.Close() - require.Equal(t, len(expected), i) -} - -func TestIAVLReversePrefixIterator(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - iavlStore.Set([]byte("test1"), []byte("test1")) - iavlStore.Set([]byte("test2"), []byte("test2")) - iavlStore.Set([]byte("test3"), []byte("test3")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4")) - iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4")) - - var i int - - iter := types.KVStoreReversePrefixIterator(iavlStore, []byte("test")) - expected := []string{"test3", "test2", "test1"} - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, expectedKey) - i++ - } - require.Equal(t, len(expected), i) - - iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)}) - expected2 := [][]byte{ - {byte(55), byte(255), byte(255), byte(255)}, - {byte(55), byte(255), byte(255), byte(1)}, - {byte(55), byte(255), byte(255), byte(0)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - require.Equal(t, len(expected), i) - - iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(255), byte(255)}) - expected2 = [][]byte{ - {byte(255), byte(255), byte(255)}, - {byte(255), byte(255), byte(1)}, - {byte(255), byte(255), byte(0)}, - } - for i = 0; iter.Valid(); iter.Next() { - expectedKey := expected2[i] - key, value := iter.Key(), iter.Value() - require.EqualValues(t, key, expectedKey) - require.EqualValues(t, value, []byte("test4")) - i++ - } - require.Equal(t, len(expected), i) -} - -func nextVersion(iavl *Store) { - key := []byte(fmt.Sprintf("Key for tree: %d", iavl.LastCommitID().Version)) - value := []byte(fmt.Sprintf("Value for tree: %d", iavl.LastCommitID().Version)) - iavl.Set(key, value) - iavl.Commit() -} - -func TestIAVLNoPrune(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - nextVersion(iavlStore) - - for i := 1; i < 100; i++ { - for j := 1; j <= i; j++ { - require.True(t, iavlStore.VersionExists(int64(j)), - "Missing version %d with latest version %d. Should be storing all versions", - j, i) - } - - nextVersion(iavlStore) - } -} - -func TestIAVLStoreQuery(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - iavlStore := UnsafeNewStore(tree) - - k1, v1 := []byte("key1"), []byte("val1") - k2, v2 := []byte("key2"), []byte("val2") - v3 := []byte("val3") - - ksub := []byte("key") - KVs0 := kv.Pairs{} - KVs1 := kv.Pairs{ - Pairs: []kv.Pair{ - {Key: k1, Value: v1}, - {Key: k2, Value: v2}, - }, - } - KVs2 := kv.Pairs{ - Pairs: []kv.Pair{ - {Key: k1, Value: v3}, - {Key: k2, Value: v2}, - }, - } - - valExpSubEmpty, err := KVs0.Marshal() - require.NoError(t, err) - - valExpSub1, err := KVs1.Marshal() - require.NoError(t, err) - - valExpSub2, err := KVs2.Marshal() - require.NoError(t, err) - - cid := iavlStore.Commit() - ver := cid.Version - query := types.RequestQuery{Path: "/key", Data: k1, Height: ver} - querySub := types.RequestQuery{Path: "/subspace", Data: ksub, Height: ver} - - // query subspace before anything set - qres, err := iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSubEmpty, qres.Value) - - // set data - iavlStore.Set(k1, v1) - iavlStore.Set(k2, v2) - - // set data without commit, doesn't show up - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Nil(t, qres.Value) - - // commit it, but still don't see on old version - cid = iavlStore.Commit() - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Nil(t, qres.Value) - - // but yes on the new version - query.Height = cid.Version - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) - - // and for the subspace - qres, err = iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSub1, qres.Value) - - // modify - iavlStore.Set(k1, v3) - cid = iavlStore.Commit() - - // query will return old values, as height is fixed - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) - - // update to latest in the query and we are happy - query.Height = cid.Version - qres, err = iavlStore.Query(&query) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v3, qres.Value) - query2 := types.RequestQuery{Path: "/key", Data: k2, Height: cid.Version} - - qres, err = iavlStore.Query(&query2) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v2, qres.Value) - // and for the subspace - qres, err = iavlStore.Query(&querySub) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, valExpSub2, qres.Value) - - // default (height 0) will show latest -1 - query0 := types.RequestQuery{Path: "/key", Data: k1} - qres, err = iavlStore.Query(&query0) - require.NoError(t, err) - require.Equal(t, uint32(0), qres.Code) - require.Equal(t, v1, qres.Value) -} - -func BenchmarkIAVLIteratorNext(b *testing.B) { - b.ReportAllocs() - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - treeSize := 1000 - tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - - for i := 0; i < treeSize; i++ { - key := randBytes(4) - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(b, err) - } - - iavlStore := UnsafeNewStore(tree) - iterators := make([]types.Iterator, b.N/treeSize) - - for i := 0; i < len(iterators); i++ { - iterators[i] = iavlStore.Iterator([]byte{0}, []byte{255, 255, 255, 255, 255}) - } - - b.ResetTimer() - for i := 0; i < len(iterators); i++ { - iter := iterators[i] - for j := 0; j < treeSize; j++ { - iter.Next() - } - } -} - -func TestSetInitialVersion(t *testing.T) { - testCases := []struct { - name string - storeFn func(db *dbm.MemDB) *Store - expPanic bool - }{ - { - "works with a mutable tree", - func(db *dbm.MemDB) *Store { - tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger()) - store := UnsafeNewStore(tree) - - return store - }, false, - }, - { - "throws error on immutable tree", - func(db *dbm.MemDB) *Store { - tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger()) - store := UnsafeNewStore(tree) - _, version, err := store.tree.SaveVersion() - require.NoError(t, err) - require.Equal(t, int64(1), version) - store, err = store.GetImmutable(1) - require.NoError(t, err) - - return store - }, true, - }, - } - - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - db := dbm.NewMemDB() - store := tc.storeFn(db) - - if tc.expPanic { - require.Panics(t, func() { store.SetInitialVersion(5) }) - } else { - store.SetInitialVersion(5) - cid := store.Commit() - require.Equal(t, int64(5), cid.GetVersion()) - } - }) - } -} - -func TestCacheWraps(t *testing.T) { - db := dbm.NewMemDB() - tree, _ := newAlohaTree(t, db) - store := UnsafeNewStore(tree) - - cacheWrapper := store.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} - -func TestChangeSets(t *testing.T) { - db := dbm.NewMemDB() - treeSize := 1000 - treeVersion := int64(10) - targetVersion := int64(6) - tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger(), iavl.FlushThresholdOption(math.MaxInt)) - - for j := int64(0); j < treeVersion; j++ { - keys := [][]byte{} - for i := 0; i < treeSize; i++ { - keys = append(keys, randBytes(4)) - } - sort.Slice(keys, func(p, q int) bool { - return bytes.Compare(keys[p], keys[q]) < 0 - }) - for i := 0; i < treeSize; i++ { - key := keys[i] - value := randBytes(50) - _, err := tree.Set(key, value) - require.NoError(t, err) - } - _, _, err := tree.SaveVersion() - require.NoError(t, err) - } - - changeSets := []*iavl.ChangeSet{} - iavlStore := UnsafeNewStore(tree) - commitID := iavlStore.LastCommitID() - - require.NoError(t, iavlStore.TraverseStateChanges(targetVersion+1, treeVersion, func(v int64, cs *iavl.ChangeSet) error { - changeSets = append(changeSets, cs) - return nil - })) - require.NoError(t, iavlStore.LoadVersionForOverwriting(targetVersion)) - - for i, cs := range changeSets { - v, err := tree.SaveChangeSet(cs) - require.NoError(t, err) - require.Equal(t, v, targetVersion+int64(i+1)) - } - - restoreCommitID := iavlStore.LastCommitID() - require.Equal(t, commitID, restoreCommitID) -} diff --git a/store/iavl/tree.go b/store/iavl/tree.go deleted file mode 100644 index f6a2db1ffc..0000000000 --- a/store/iavl/tree.go +++ /dev/null @@ -1,108 +0,0 @@ -package iavl - -import ( - "fmt" - - "github.com/cosmos/iavl" - idb "github.com/cosmos/iavl/db" -) - -var ( - _ Tree = (*immutableTree)(nil) - _ Tree = (*iavl.MutableTree)(nil) -) - -type ( - // Tree defines an interface that both mutable and immutable IAVL trees - // must implement. For mutable IAVL trees, the interface is directly - // implemented by an iavl.MutableTree. For an immutable IAVL tree, a wrapper - // must be made. - Tree interface { - Has(key []byte) (bool, error) - Get(key []byte) ([]byte, error) - Set(key, value []byte) (bool, error) - Remove(key []byte) ([]byte, bool, error) - SetCommitting() - UnsetCommitting() - SaveVersion() ([]byte, int64, error) - Version() int64 - Hash() []byte - WorkingHash() []byte - VersionExists(version int64) bool - DeleteVersionsTo(version int64) error - GetVersioned(key []byte, version int64) ([]byte, error) - GetImmutable(version int64) (*iavl.ImmutableTree, error) - SetInitialVersion(version uint64) - Iterator(start, end []byte, ascending bool) (idb.Iterator, error) - AvailableVersions() []int - LoadVersionForOverwriting(targetVersion int64) error - TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error - } - - // immutableTree is a simple wrapper around a reference to an iavl.ImmutableTree - // that implements the Tree interface. It should only be used for querying - // and iteration, specifically at previous heights. - immutableTree struct { - *iavl.ImmutableTree - } -) - -func (it *immutableTree) Set(_, _ []byte) (bool, error) { - panic("cannot call 'Set' on an immutable IAVL tree") -} - -func (it *immutableTree) Remove(_ []byte) ([]byte, bool, error) { - panic("cannot call 'Remove' on an immutable IAVL tree") -} - -func (it *immutableTree) SetCommitting() { - panic("cannot call 'SetCommitting' on an immutable IAVL tree") -} - -func (it *immutableTree) UnsetCommitting() { - panic("cannot call 'UnsetCommitting' on an immutable IAVL tree") -} - -func (it *immutableTree) SaveVersion() ([]byte, int64, error) { - panic("cannot call 'SaveVersion' on an immutable IAVL tree") -} - -func (it *immutableTree) DeleteVersionsTo(_ int64) error { - panic("cannot call 'DeleteVersionsTo' on an immutable IAVL tree") -} - -func (it *immutableTree) SetInitialVersion(_ uint64) { - panic("cannot call 'SetInitialVersion' on an immutable IAVL tree") -} - -func (it *immutableTree) VersionExists(version int64) bool { - return it.Version() == version -} - -func (it *immutableTree) GetVersioned(key []byte, version int64) ([]byte, error) { - if it.Version() != version { - return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version()) - } - - return it.Get(key) -} - -func (it *immutableTree) GetImmutable(version int64) (*iavl.ImmutableTree, error) { - if it.Version() != version { - return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version()) - } - - return it.ImmutableTree, nil -} - -func (it *immutableTree) AvailableVersions() []int { - return []int{} -} - -func (it *immutableTree) LoadVersionForOverwriting(targetVersion int64) error { - panic("cannot call 'LoadVersionForOverwriting' on an immutable IAVL tree") -} - -func (it *immutableTree) WorkingHash() []byte { - panic("cannot call 'WorkingHash' on an immutable IAVL tree") -} diff --git a/store/iavl/tree_test.go b/store/iavl/tree_test.go deleted file mode 100644 index 243355e42e..0000000000 --- a/store/iavl/tree_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package iavl - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/wrapper" -) - -func TestImmutableTreePanics(t *testing.T) { - t.Parallel() - immTree := iavl.NewImmutableTree(wrapper.NewDBWrapper(dbm.NewMemDB()), 100, false, log.NewNopLogger()) - it := &immutableTree{immTree} - require.Panics(t, func() { - _, err := it.Set([]byte{}, []byte{}) - require.NoError(t, err) - }) - require.Panics(t, func() { - _, _, err := it.Remove([]byte{}) - require.NoError(t, err) - }) - require.Panics(t, func() { _, _, _ = it.SaveVersion() }) - require.Panics(t, func() { _ = it.DeleteVersionsTo(int64(1)) }) - - val, err := it.GetVersioned(nil, 1) - require.Error(t, err) - require.Nil(t, val) - - imm, err := it.GetImmutable(1) - require.Error(t, err) - require.Nil(t, imm) - - imm, err = it.GetImmutable(0) - require.NoError(t, err) - require.NotNil(t, imm) - require.Equal(t, immTree, imm) -} diff --git a/store/internal/conv/doc.go b/store/internal/conv/doc.go deleted file mode 100644 index 4b45d1ab53..0000000000 --- a/store/internal/conv/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package conv provides internal functions for conversions and data manipulation -package conv diff --git a/store/internal/conv/string.go b/store/internal/conv/string.go deleted file mode 100644 index 96d89c3a5f..0000000000 --- a/store/internal/conv/string.go +++ /dev/null @@ -1,19 +0,0 @@ -package conv - -import ( - "unsafe" -) - -// UnsafeStrToBytes uses unsafe to convert string into byte array. Returned bytes -// must not be altered after this function is called as it will cause a segmentation fault. -func UnsafeStrToBytes(s string) []byte { - return unsafe.Slice(unsafe.StringData(s), len(s)) // ref https://github.com/golang/go/issues/53003#issuecomment-1140276077 -} - -// UnsafeBytesToStr is meant to make a zero allocation conversion -// from []byte -> string to speed up operations, it is not meant -// to be used generally, but for a specific pattern to delete keys -// from a map. -func UnsafeBytesToStr(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} diff --git a/store/internal/conv/string_test.go b/store/internal/conv/string_test.go deleted file mode 100644 index 3a14517531..0000000000 --- a/store/internal/conv/string_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package conv - -import ( - "runtime" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/suite" -) - -func TestStringSuite(t *testing.T) { - suite.Run(t, new(StringSuite)) -} - -type StringSuite struct{ suite.Suite } - -func unsafeConvertStr() []byte { - return UnsafeStrToBytes("abc") -} - -func (s *StringSuite) TestUnsafeStrToBytes() { - // we convert in other function to trigger GC. We want to check that - // the underlying array in []bytes is accessible after GC will finish swapping. - for i := 0; i < 5; i++ { - b := unsafeConvertStr() - runtime.GC() - <-time.NewTimer(2 * time.Millisecond).C - b2 := append(b, 'd') - s.Equal("abc", string(b)) - s.Equal("abcd", string(b2)) - } -} - -func unsafeConvertBytes() string { - return UnsafeBytesToStr([]byte("abc")) -} - -func (s *StringSuite) TestUnsafeBytesToStr() { - // we convert in other function to trigger GC. We want to check that - // the underlying array in []bytes is accessible after GC will finish swapping. - for i := 0; i < 5; i++ { - str := unsafeConvertBytes() - runtime.GC() - <-time.NewTimer(2 * time.Millisecond).C - s.Equal("abc", str) - } -} - -func BenchmarkUnsafeStrToBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - UnsafeStrToBytes(strconv.Itoa(i)) - } -} diff --git a/store/internal/kv/helpers.go b/store/internal/kv/helpers.go deleted file mode 100644 index 5bccea122e..0000000000 --- a/store/internal/kv/helpers.go +++ /dev/null @@ -1,17 +0,0 @@ -package kv - -import "fmt" - -// AssertKeyAtLeastLength panics when store key length is less than the given length. -func AssertKeyAtLeastLength(bz []byte, length int) { - if len(bz) < length { - panic(fmt.Sprintf("expected key of length at least %d, got %d", length, len(bz))) - } -} - -// AssertKeyLength panics when store key length is not equal to the given length. -func AssertKeyLength(bz []byte, length int) { - if len(bz) != length { - panic(fmt.Sprintf("unexpected key length; got: %d, expected: %d", len(bz), length)) - } -} diff --git a/store/internal/kv/kv.go b/store/internal/kv/kv.go deleted file mode 100644 index 1f3da91cc2..0000000000 --- a/store/internal/kv/kv.go +++ /dev/null @@ -1,28 +0,0 @@ -package kv - -import ( - "bytes" - "sort" -) - -func (kvs Pairs) Len() int { return len(kvs.Pairs) } -func (kvs Pairs) Less(i, j int) bool { - switch bytes.Compare(kvs.Pairs[i].Key, kvs.Pairs[j].Key) { - case -1: - return true - - case 0: - return bytes.Compare(kvs.Pairs[i].Value, kvs.Pairs[j].Value) < 0 - - case 1: - return false - - default: - panic("invalid comparison result") - } -} - -func (kvs Pairs) Swap(i, j int) { kvs.Pairs[i], kvs.Pairs[j] = kvs.Pairs[j], kvs.Pairs[i] } - -// Sort invokes sort.Sort on kvs. -func (kvs Pairs) Sort() { sort.Sort(kvs) } diff --git a/store/internal/kv/kv.pb.go b/store/internal/kv/kv.pb.go deleted file mode 100644 index 847bd11d44..0000000000 --- a/store/internal/kv/kv.pb.go +++ /dev/null @@ -1,559 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/internal/kv/v1beta1/kv.proto - -package kv - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Pairs defines a repeated slice of Pair objects. -type Pairs struct { - Pairs []Pair `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs"` -} - -func (m *Pairs) Reset() { *m = Pairs{} } -func (m *Pairs) String() string { return proto.CompactTextString(m) } -func (*Pairs) ProtoMessage() {} -func (*Pairs) Descriptor() ([]byte, []int) { - return fileDescriptor_534782c4083e056d, []int{0} -} -func (m *Pairs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Pairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Pairs.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Pairs) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pairs.Merge(m, src) -} -func (m *Pairs) XXX_Size() int { - return m.Size() -} -func (m *Pairs) XXX_DiscardUnknown() { - xxx_messageInfo_Pairs.DiscardUnknown(m) -} - -var xxx_messageInfo_Pairs proto.InternalMessageInfo - -func (m *Pairs) GetPairs() []Pair { - if m != nil { - return m.Pairs - } - return nil -} - -// Pair defines a key/value bytes tuple. -type Pair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *Pair) Reset() { *m = Pair{} } -func (m *Pair) String() string { return proto.CompactTextString(m) } -func (*Pair) ProtoMessage() {} -func (*Pair) Descriptor() ([]byte, []int) { - return fileDescriptor_534782c4083e056d, []int{1} -} -func (m *Pair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Pair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Pair) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pair.Merge(m, src) -} -func (m *Pair) XXX_Size() int { - return m.Size() -} -func (m *Pair) XXX_DiscardUnknown() { - xxx_messageInfo_Pair.DiscardUnknown(m) -} - -var xxx_messageInfo_Pair proto.InternalMessageInfo - -func (m *Pair) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *Pair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Pairs)(nil), "cosmos.store.internal.kv.v1beta1.Pairs") - proto.RegisterType((*Pair)(nil), "cosmos.store.internal.kv.v1beta1.Pair") -} - -func init() { - proto.RegisterFile("cosmos/store/internal/kv/v1beta1/kv.proto", fileDescriptor_534782c4083e056d) -} - -var fileDescriptor_534782c4083e056d = []byte{ - // 217 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce, - 0xcd, 0x2f, 0xd6, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, - 0xcc, 0xd1, 0xcf, 0x2e, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0x2e, 0xd3, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x80, 0x28, 0xd5, 0x03, 0x2b, 0xd5, 0x83, 0x29, 0xd5, - 0xcb, 0x2e, 0xd3, 0x83, 0x2a, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd6, 0x07, 0xb1, - 0x20, 0xfa, 0x94, 0xbc, 0xb9, 0x58, 0x03, 0x12, 0x33, 0x8b, 0x8a, 0x85, 0x9c, 0xb8, 0x58, 0x0b, - 0x40, 0x0c, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x35, 0x3d, 0x42, 0x06, 0xea, 0x81, 0xf4, - 0x39, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0xd1, 0xaa, 0xa4, 0xc7, 0xc5, 0x02, 0x12, 0x14, - 0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, - 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x93, 0xc5, - 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, - 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xc9, 0x41, 0x6c, 0x2f, 0x4e, 0xc9, - 0xd6, 0xcb, 0xcc, 0xc7, 0xf4, 0x7f, 0x12, 0x1b, 0xd8, 0xf5, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x5d, 0xad, 0x97, 0xdd, 0x22, 0x01, 0x00, 0x00, -} - -func (m *Pairs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Pairs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Pairs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pairs) > 0 { - for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Pairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Pair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Pair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - offset -= sovKv(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Pairs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Pairs) > 0 { - for _, e := range m.Pairs { - l = e.Size() - n += 1 + l + sovKv(uint64(l)) - } - } - return n -} - -func (m *Pair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovKv(uint64(l)) - } - return n -} - -func sovKv(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozKv(x uint64) (n int) { - return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Pairs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pairs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pairs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pairs = append(m.Pairs, Pair{}) - if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Pair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowKv - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthKv - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipKv(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthKv - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipKv(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowKv - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthKv - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupKv - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthKv - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/internal/maps/bench_test.go b/store/internal/maps/bench_test.go deleted file mode 100644 index 4d7f680c70..0000000000 --- a/store/internal/maps/bench_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package maps - -import "testing" - -func BenchmarkKVPairBytes(b *testing.B) { - kvp := NewKVPair(make([]byte, 128), make([]byte, 1e6)) - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - b.SetBytes(int64(len(kvp.Bytes()))) - } -} diff --git a/store/internal/maps/maps.go b/store/internal/maps/maps.go deleted file mode 100644 index 6db2be666c..0000000000 --- a/store/internal/maps/maps.go +++ /dev/null @@ -1,216 +0,0 @@ -package maps - -import ( - "crypto/sha256" - "encoding/binary" - - cmtprotocrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" - "github.com/cometbft/cometbft/crypto/merkle" - - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/internal/tree" -) - -// merkleMap defines a merkle-ized tree from a map. Leave values are treated as -// hash(key) | hash(value). Leaves are sorted before Merkle hashing. -type merkleMap struct { - kvs kv.Pairs - sorted bool -} - -func newMerkleMap() *merkleMap { - return &merkleMap{ - kvs: kv.Pairs{}, - sorted: false, - } -} - -// Set creates a kv.Pair from the provided key and value. The value is hashed prior -// to creating a kv.Pair. The created kv.Pair is appended to the MerkleMap's slice -// of kv.Pairs. Whenever called, the MerkleMap must be resorted. -func (sm *merkleMap) set(key string, value []byte) { - byteKey := []byte(key) - assertValidKey(byteKey) - - sm.sorted = false - - // The value is hashed, so you can check for equality with a cached value (say) - // and make a determination to fetch or not. - vhash := sha256.Sum256(value) - - sm.kvs.Pairs = append(sm.kvs.Pairs, kv.Pair{ - Key: byteKey, - Value: vhash[:], - }) -} - -// Hash returns the merkle root of items sorted by key. Note, it is unstable. -func (sm *merkleMap) hash() []byte { - sm.sort() - return hashKVPairs(sm.kvs) -} - -func (sm *merkleMap) sort() { - if sm.sorted { - return - } - - sm.kvs.Sort() - sm.sorted = true -} - -// hashKVPairs hashes a kvPair and creates a merkle tree where the leaves are -// byte slices. -func hashKVPairs(kvs kv.Pairs) []byte { - kvsH := make([][]byte, len(kvs.Pairs)) - for i, kvp := range kvs.Pairs { - kvsH[i] = KVPair(kvp).Bytes() - } - - return tree.HashFromByteSlices(kvsH) -} - -// --------------------------------------------- - -// Merkle tree from a map. -// Leaves are `hash(key) | hash(value)`. -// Leaves are sorted before Merkle hashing. -type simpleMap struct { - Kvs kv.Pairs - sorted bool -} - -func newSimpleMap() *simpleMap { - return &simpleMap{ - Kvs: kv.Pairs{}, - sorted: false, - } -} - -// Set creates a kv pair of the key and the hash of the value, -// and then appends it to SimpleMap's kv pairs. -func (sm *simpleMap) Set(key string, value []byte) { - byteKey := []byte(key) - assertValidKey(byteKey) - sm.sorted = false - - // The value is hashed, so you can - // check for equality with a cached value (say) - // and make a determination to fetch or not. - vhash := sha256.Sum256(value) - - sm.Kvs.Pairs = append(sm.Kvs.Pairs, kv.Pair{ - Key: byteKey, - Value: vhash[:], - }) -} - -// Hash Merkle root hash of items sorted by key -// (UNSTABLE: and by value too if duplicate key). -func (sm *simpleMap) Hash() []byte { - sm.Sort() - return hashKVPairs(sm.Kvs) -} - -func (sm *simpleMap) Sort() { - if sm.sorted { - return - } - sm.Kvs.Sort() - sm.sorted = true -} - -// KVPairs returns a copy of sorted KVPairs. -// NOTE these contain the hashed key and value. -func (sm *simpleMap) KVPairs() kv.Pairs { - sm.Sort() - kvs := kv.Pairs{ - Pairs: make([]kv.Pair, len(sm.Kvs.Pairs)), - } - - copy(kvs.Pairs, sm.Kvs.Pairs) - return kvs -} - -//---------------------------------------- - -// KVPair is a local extension to KVPair that can be hashed. -// Key and value are length prefixed and concatenated, -// then hashed. -type KVPair kv.Pair - -// NewKVPair takes in a key and value and creates a kv.Pair -// wrapped in the local extension KVPair -func NewKVPair(key, value []byte) KVPair { - return KVPair(kv.Pair{ - Key: key, - Value: value, - }) -} - -// Bytes returns key || value, with both the -// key and value length prefixed. -func (kv KVPair) Bytes() []byte { - // In the worst case: - // * 8 bytes to Uvarint encode the length of the key - // * 8 bytes to Uvarint encode the length of the value - // So preallocate for the worst case, which will in total - // be a maximum of 14 bytes wasted, if len(key)=1, len(value)=1, - // but that's going to rare. - buf := make([]byte, 8+len(kv.Key)+8+len(kv.Value)) - - // Encode the key, prefixed with its length. - nlk := binary.PutUvarint(buf, uint64(len(kv.Key))) - nk := copy(buf[nlk:], kv.Key) - - // Encode the value, prefixing with its length. - nlv := binary.PutUvarint(buf[nlk+nk:], uint64(len(kv.Value))) - nv := copy(buf[nlk+nk+nlv:], kv.Value) - - return buf[:nlk+nk+nlv+nv] -} - -// HashFromMap computes a merkle tree from sorted map and returns the merkle -// root. -func HashFromMap(m map[string][]byte) []byte { - mm := newMerkleMap() - for k, v := range m { - mm.set(k, v) - } - - return mm.hash() -} - -// ProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values -// in the underlying key-value pairs. -// The keys are sorted before the proofs are computed. -func ProofsFromMap(m map[string][]byte) ([]byte, map[string]*cmtprotocrypto.Proof, []string) { - sm := newSimpleMap() - for k, v := range m { - sm.Set(k, v) - } - - sm.Sort() - kvs := sm.Kvs - kvsBytes := make([][]byte, len(kvs.Pairs)) - for i, kvp := range kvs.Pairs { - kvsBytes[i] = KVPair(kvp).Bytes() - } - - rootHash, proofList := merkle.ProofsFromByteSlices(kvsBytes) - proofs := make(map[string]*cmtprotocrypto.Proof) - keys := make([]string, len(proofList)) - - for i, kvp := range kvs.Pairs { - proofs[string(kvp.Key)] = proofList[i].ToProto() - keys[i] = string(kvp.Key) - } - - return rootHash, proofs, keys -} - -func assertValidKey(key []byte) { - if len(key) == 0 { - panic("key is nil") - } -} diff --git a/store/internal/maps/maps_test.go b/store/internal/maps/maps_test.go deleted file mode 100644 index ce7ad72e64..0000000000 --- a/store/internal/maps/maps_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package maps - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEmptyKeyMerkleMap(t *testing.T) { - db := newMerkleMap() - require.Panics(t, func() { db.set("", []byte("value")) }, "setting an empty key should panic") -} - -func TestMerkleMap(t *testing.T) { - tests := []struct { - keys []string - values []string // each string gets converted to []byte in test - want string - }{ - {[]string{}, []string{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - {[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"}, - {[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"}, - // swap order with 2 keys - { - []string{"key1", "key2"}, - []string{"value1", "value2"}, - "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", - }, - { - []string{"key2", "key1"}, - []string{"value2", "value1"}, - "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", - }, - // swap order with 3 keys - { - []string{"key1", "key2", "key3"}, - []string{"value1", "value2", "value3"}, - "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", - }, - { - []string{"key1", "key3", "key2"}, - []string{"value1", "value3", "value2"}, - "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", - }, - } - for i, tc := range tests { - db := newMerkleMap() - for i := 0; i < len(tc.keys); i++ { - db.set(tc.keys[i], []byte(tc.values[i])) - } - - got := db.hash() - assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i) - } -} - -func TestEmptyKeySimpleMap(t *testing.T) { - db := newSimpleMap() - require.Panics(t, func() { db.Set("", []byte("value")) }, "setting an empty key should panic") -} - -func TestSimpleMap(t *testing.T) { - tests := []struct { - keys []string - values []string // each string gets converted to []byte in test - want string - }{ - {[]string{}, []string{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - {[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"}, - {[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"}, - // swap order with 2 keys - { - []string{"key1", "key2"}, - []string{"value1", "value2"}, - "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", - }, - { - []string{"key2", "key1"}, - []string{"value2", "value1"}, - "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", - }, - // swap order with 3 keys - { - []string{"key1", "key2", "key3"}, - []string{"value1", "value2", "value3"}, - "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", - }, - { - []string{"key1", "key3", "key2"}, - []string{"value1", "value3", "value2"}, - "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", - }, - } - for i, tc := range tests { - db := newSimpleMap() - for i := 0; i < len(tc.keys); i++ { - db.Set(tc.keys[i], []byte(tc.values[i])) - } - got := db.Hash() - assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i) - } -} diff --git a/store/internal/proofs/convert.go b/store/internal/proofs/convert.go deleted file mode 100644 index 05ae4b1d13..0000000000 --- a/store/internal/proofs/convert.go +++ /dev/null @@ -1,98 +0,0 @@ -package proofs - -import ( - "fmt" - "math/bits" - - cmtprotocrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" - ics23 "github.com/cosmos/ics23/go" -) - -// ConvertExistenceProof will convert the given proof into a valid -// existence proof, if that's what it is. -// -// This is the simplest case of the range proof and we will focus on -// demoing compatibility here -func ConvertExistenceProof(p *cmtprotocrypto.Proof, key, value []byte) (*ics23.ExistenceProof, error) { - path, err := convertInnerOps(p) - if err != nil { - return nil, err - } - - proof := &ics23.ExistenceProof{ - Key: key, - Value: value, - Leaf: convertLeafOp(), - Path: path, - } - return proof, nil -} - -// this is adapted from merkle/hash.go:leafHash() -// and merkle/simple_map.go:KVPair.Bytes() -func convertLeafOp() *ics23.LeafOp { - prefix := []byte{0} - - return &ics23.LeafOp{ - Hash: ics23.HashOp_SHA256, - PrehashKey: ics23.HashOp_NO_HASH, - PrehashValue: ics23.HashOp_SHA256, - Length: ics23.LengthOp_VAR_PROTO, - Prefix: prefix, - } -} - -func convertInnerOps(p *cmtprotocrypto.Proof) ([]*ics23.InnerOp, error) { - inners := make([]*ics23.InnerOp, 0, len(p.Aunts)) - path := buildPath(p.Index, p.Total) - - if len(p.Aunts) != len(path) { - return nil, fmt.Errorf("calculated a path different length (%d) than provided by SimpleProof (%d)", len(path), len(p.Aunts)) - } - - for i, aunt := range p.Aunts { - auntRight := path[i] - - // combine with: 0x01 || lefthash || righthash - inner := &ics23.InnerOp{Hash: ics23.HashOp_SHA256} - if auntRight { - inner.Prefix = []byte{1} - inner.Suffix = aunt - } else { - inner.Prefix = append([]byte{1}, aunt...) - } - inners = append(inners, inner) - } - return inners, nil -} - -// buildPath returns a list of steps from leaf to root -// in each step, true means index is left side, false index is right side -// code adapted from merkle/simple_proof.go:computeHashFromAunts -func buildPath(idx, total int64) []bool { - if total < 2 { - return nil - } - numLeft := getSplitPoint(total) - goLeft := idx < numLeft - - // we put goLeft at the end of the array, as we recurse from top to bottom, - // and want the leaf to be first in array, root last - if goLeft { - return append(buildPath(idx, numLeft), goLeft) - } - return append(buildPath(idx-numLeft, total-numLeft), goLeft) -} - -func getSplitPoint(length int64) int64 { - if length < 1 { - panic("Trying to split a tree with size < 1") - } - uLength := uint(length) - bitlen := bits.Len(uLength) - k := int64(1 << uint(bitlen-1)) - if k == length { - k >>= 1 - } - return k -} diff --git a/store/internal/proofs/convert_test.go b/store/internal/proofs/convert_test.go deleted file mode 100644 index 19c5a67615..0000000000 --- a/store/internal/proofs/convert_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package proofs - -import ( - "bytes" - "fmt" - "testing" -) - -func TestLeafOp(t *testing.T) { - proof := GenerateRangeProof(20, Middle) - - converted, err := ConvertExistenceProof(proof.Proof, proof.Key, proof.Value) - if err != nil { - t.Fatal(err) - } - - leaf := converted.GetLeaf() - if leaf == nil { - t.Fatalf("Missing leaf node") - } - - hash, err := leaf.Apply(converted.Key, converted.Value) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(hash, proof.Proof.LeafHash) { - t.Errorf("Calculated: %X\nExpected: %X", hash, proof.Proof.LeafHash) - } -} - -func TestBuildPath(t *testing.T) { - cases := map[string]struct { - idx int64 - total int64 - expected []bool - }{ - "pair left": { - idx: 0, - total: 2, - expected: []bool{true}, - }, - "pair right": { - idx: 1, - total: 2, - expected: []bool{false}, - }, - "power of 2": { - idx: 3, - total: 8, - expected: []bool{false, false, true}, - }, - "size of 7 right most": { - idx: 6, - total: 7, - expected: []bool{false, false}, - }, - "size of 6 right-left (from top)": { - idx: 4, - total: 6, - expected: []bool{true, false}, - }, - "size of 6 left-right-left (from top)": { - idx: 2, - total: 7, - expected: []bool{true, false, true}, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - path := buildPath(tc.idx, tc.total) - if len(path) != len(tc.expected) { - t.Fatalf("Got %v\nExpected %v", path, tc.expected) - } - for i := range path { - if path[i] != tc.expected[i] { - t.Fatalf("Differ at %d\nGot %v\nExpected %v", i, path, tc.expected) - } - } - }) - } -} - -func TestConvertProof(t *testing.T) { - for i := 0; i < 100; i++ { - t.Run(fmt.Sprintf("Run %d", i), func(t *testing.T) { - proof := GenerateRangeProof(57, Left) - - converted, err := ConvertExistenceProof(proof.Proof, proof.Key, proof.Value) - if err != nil { - t.Fatal(err) - } - - calc, err := converted.Calculate() - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(calc, proof.RootHash) { - t.Errorf("Calculated: %X\nExpected: %X", calc, proof.RootHash) - } - }) - } -} diff --git a/store/internal/proofs/create.go b/store/internal/proofs/create.go deleted file mode 100644 index 55874d99cd..0000000000 --- a/store/internal/proofs/create.go +++ /dev/null @@ -1,103 +0,0 @@ -package proofs - -import ( - "errors" - "sort" - - ics23 "github.com/cosmos/ics23/go" - - sdkmaps "cosmossdk.io/store/internal/maps" -) - -var ( - ErrEmptyKey = errors.New("key is empty") - ErrEmptyKeyInData = errors.New("data contains empty key") -) - -/* -CreateMembershipProof will produce a CommitmentProof that the given key (and queries value) exists in the map. -If the key doesn't exist in the tree, this will return an error. -*/ -func CreateMembershipProof(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) { - if len(key) == 0 { - return nil, ErrEmptyKey - } - exist, err := createExistenceProof(data, key) - if err != nil { - return nil, err - } - proof := &ics23.CommitmentProof{ - Proof: &ics23.CommitmentProof_Exist{ - Exist: exist, - }, - } - return proof, nil -} - -/* -CreateNonMembershipProof will produce a CommitmentProof that the given key doesn't exist in the map. -If the key exists in the tree, this will return an error. -*/ -func CreateNonMembershipProof(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) { - if len(key) == 0 { - return nil, ErrEmptyKey - } - // ensure this key is not in the store - if _, ok := data[string(key)]; ok { - return nil, errors.New("cannot create non-membership proof if key is in map") - } - - keys := SortedKeys(data) - rightidx := sort.SearchStrings(keys, string(key)) - - var err error - nonexist := &ics23.NonExistenceProof{ - Key: key, - } - - // include left proof unless key is left of entire map - if rightidx >= 1 { - leftkey := keys[rightidx-1] - nonexist.Left, err = createExistenceProof(data, []byte(leftkey)) - if err != nil { - return nil, err - } - } - - // include right proof unless key is right of entire map - if rightidx < len(keys) { - rightkey := keys[rightidx] - nonexist.Right, err = createExistenceProof(data, []byte(rightkey)) - if err != nil { - return nil, err - } - - } - - proof := &ics23.CommitmentProof{ - Proof: &ics23.CommitmentProof_Nonexist{ - Nonexist: nonexist, - }, - } - return proof, nil -} - -func createExistenceProof(data map[string][]byte, key []byte) (*ics23.ExistenceProof, error) { - for k := range data { - if k == "" { - return nil, ErrEmptyKeyInData - } - } - value, ok := data[string(key)] - if !ok { - return nil, errors.New("cannot make existence proof if key is not in map") - } - - _, proofs, _ := sdkmaps.ProofsFromMap(data) - proof := proofs[string(key)] - if proof == nil { - return nil, errors.New("returned no proof for key") - } - - return ConvertExistenceProof(proof, key, value) -} diff --git a/store/internal/proofs/create_test.go b/store/internal/proofs/create_test.go deleted file mode 100644 index 16818e657a..0000000000 --- a/store/internal/proofs/create_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package proofs - -import ( - "errors" - "testing" - - ics23 "github.com/cosmos/ics23/go" - "github.com/stretchr/testify/assert" -) - -func TestCreateMembership(t *testing.T) { - cases := map[string]struct { - size int - loc Where - }{ - "small left": {size: 100, loc: Left}, - "small middle": {size: 100, loc: Middle}, - "small right": {size: 100, loc: Right}, - "big left": {size: 5431, loc: Left}, - "big middle": {size: 5431, loc: Middle}, - "big right": {size: 5431, loc: Right}, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - data := BuildMap(tc.size) - allkeys := SortedKeys(data) - key := GetKey(allkeys, tc.loc) - nonKey := GetNonKey(allkeys, tc.loc) - - // error if the key does not exist - proof, err := CreateMembershipProof(data, []byte(nonKey)) - assert.EqualError(t, err, "cannot make existence proof if key is not in map") - assert.Nil(t, proof) - - val := data[key] - proof, err = CreateMembershipProof(data, []byte(key)) - if err != nil { - t.Fatalf("Creating Proof: %+v", err) - } - if proof.GetExist() == nil { - t.Fatal("Unexpected proof format") - } - - root := CalcRoot(data) - err = proof.GetExist().Verify(ics23.TendermintSpec, root, []byte(key), val) - if err != nil { - t.Fatalf("Verifying Proof: %+v", err) - } - - valid := ics23.VerifyMembership(ics23.TendermintSpec, root, proof, []byte(key), val) - if !valid { - t.Fatalf("Membership Proof Invalid") - } - }) - } -} - -func TestCreateNonMembership(t *testing.T) { - cases := map[string]struct { - size int - loc Where - }{ - "small left": {size: 100, loc: Left}, - "small middle": {size: 100, loc: Middle}, - "small right": {size: 100, loc: Right}, - "big left": {size: 5431, loc: Left}, - "big middle": {size: 5431, loc: Middle}, - "big right": {size: 5431, loc: Right}, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - data := BuildMap(tc.size) - allkeys := SortedKeys(data) - nonKey := GetNonKey(allkeys, tc.loc) - key := GetKey(allkeys, tc.loc) - - // error if the key exists - proof, err := CreateNonMembershipProof(data, []byte(key)) - assert.EqualError(t, err, "cannot create non-membership proof if key is in map") - assert.Nil(t, proof) - - proof, err = CreateNonMembershipProof(data, []byte(nonKey)) - if err != nil { - t.Fatalf("Creating Proof: %+v", err) - } - if proof.GetNonexist() == nil { - t.Fatal("Unexpected proof format") - } - - root := CalcRoot(data) - err = proof.GetNonexist().Verify(ics23.TendermintSpec, root, []byte(nonKey)) - if err != nil { - t.Fatalf("Verifying Proof: %+v", err) - } - - valid := ics23.VerifyNonMembership(ics23.TendermintSpec, root, proof, []byte(nonKey)) - if !valid { - t.Fatalf("Non Membership Proof Invalid") - } - }) - } -} - -func TestInvalidKey(t *testing.T) { - tests := []struct { - name string - f func(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) - data map[string][]byte - key []byte - err error - }{ - {"CreateMembershipProof empty key", CreateMembershipProof, map[string][]byte{"": nil}, []byte(""), ErrEmptyKey}, - {"CreateMembershipProof empty key in data", CreateMembershipProof, map[string][]byte{"": nil, " ": nil}, []byte(" "), ErrEmptyKeyInData}, - {"CreateNonMembershipProof empty key", CreateNonMembershipProof, map[string][]byte{" ": nil}, []byte(""), ErrEmptyKey}, - {"CreateNonMembershipProof empty key in data", CreateNonMembershipProof, map[string][]byte{"": nil}, []byte(" "), ErrEmptyKeyInData}, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - _, err := tc.f(tc.data, tc.key) - assert.True(t, errors.Is(err, tc.err)) - }) - } -} diff --git a/store/internal/proofs/helpers.go b/store/internal/proofs/helpers.go deleted file mode 100644 index a87a692212..0000000000 --- a/store/internal/proofs/helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -package proofs - -import ( - "sort" - - cmtprotocrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" - "golang.org/x/exp/maps" - - "cosmossdk.io/math/unsafe" - sdkmaps "cosmossdk.io/store/internal/maps" -) - -// SimpleResult contains a merkle.SimpleProof along with all data needed to build the confio/proof -type SimpleResult struct { - Key []byte - Value []byte - Proof *cmtprotocrypto.Proof - RootHash []byte -} - -// GenerateRangeProof makes a tree of size and returns a range proof for one random element -// -// returns a range proof and the root hash of the tree -func GenerateRangeProof(size int, loc Where) *SimpleResult { - data := BuildMap(size) - root, proofs, allkeys := sdkmaps.ProofsFromMap(data) - - key := GetKey(allkeys, loc) - proof := proofs[key] - - res := &SimpleResult{ - Key: []byte(key), - Value: toValue(key), - Proof: proof, - RootHash: root, - } - return res -} - -// Where selects a location for a key - Left, Right, or Middle -type Where int - -const ( - Left Where = iota - Right - Middle -) - -func SortedKeys(data map[string][]byte) []string { - keys := maps.Keys(data) - sort.Strings(keys) - return keys -} - -func CalcRoot(data map[string][]byte) []byte { - root, _, _ := sdkmaps.ProofsFromMap(data) - return root -} - -// GetKey this returns a key, on Left/Right/Middle -func GetKey(allkeys []string, loc Where) string { - if loc == Left { - return allkeys[0] - } - if loc == Right { - return allkeys[len(allkeys)-1] - } - // select a random index between 1 and allkeys-2 - idx := unsafe.NewRand().Int()%(len(allkeys)-2) + 1 - return allkeys[idx] -} - -// GetNonKey returns a missing key - Left of all, Right of all, or in the Middle -func GetNonKey(allkeys []string, loc Where) string { - if loc == Left { - return string([]byte{1, 1, 1, 1}) - } - if loc == Right { - return string([]byte{0xff, 0xff, 0xff, 0xff}) - } - // otherwise, next to an existing key (copy before mod) - key := GetKey(allkeys, loc) - key = key[:len(key)-2] + string([]byte{255, 255}) - return key -} - -func toValue(key string) []byte { - return []byte("value_for_" + key) -} - -// BuildMap creates random key/values and stores in a map, -// returns a list of all keys in sorted order -func BuildMap(size int) map[string][]byte { - data := make(map[string][]byte) - // insert lots of info and store the bytes - for i := 0; i < size; i++ { - key := unsafe.Str(20) - data[key] = toValue(key) - } - return data -} diff --git a/store/internal/tree/hash.go b/store/internal/tree/hash.go deleted file mode 100644 index a4facd93e9..0000000000 --- a/store/internal/tree/hash.go +++ /dev/null @@ -1,68 +0,0 @@ -package tree - -import ( - "crypto/sha256" - "hash" - "math/bits" -) - -var ( - leafPrefix = []byte{0} - innerPrefix = []byte{1} -) - -// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice, -// in the provided order. It follows RFC-6962. -func HashFromByteSlices(items [][]byte) []byte { - return hashFromByteSlices(sha256.New(), items) -} - -func hashFromByteSlices(sha hash.Hash, items [][]byte) []byte { - switch len(items) { - case 0: - return emptyHash() - case 1: - return leafHashOpt(sha, items[0]) - default: - k := getSplitPoint(int64(len(items))) - left := hashFromByteSlices(sha, items[:k]) - right := hashFromByteSlices(sha, items[k:]) - return innerHashOpt(sha, left, right) - } -} - -// returns tmhash(0x00 || leaf) -func leafHashOpt(s hash.Hash, leaf []byte) []byte { - s.Reset() - s.Write(leafPrefix) - s.Write(leaf) - return s.Sum(nil) -} - -func innerHashOpt(s hash.Hash, left, right []byte) []byte { - s.Reset() - s.Write(innerPrefix) - s.Write(left) - s.Write(right) - return s.Sum(nil) -} - -// returns tmhash() -func emptyHash() []byte { - h := sha256.Sum256([]byte{}) - return h[:] -} - -// getSplitPoint returns the largest power of 2 less than length -func getSplitPoint(length int64) int64 { - if length < 1 { - panic("Trying to split a tree with size < 1") - } - uLength := uint(length) - bitlen := bits.Len(uLength) - k := int64(1 << uint(bitlen-1)) - if k == length { - k >>= 1 - } - return k -} diff --git a/store/listenkv/store.go b/store/listenkv/store.go deleted file mode 100644 index b08a6e3950..0000000000 --- a/store/listenkv/store.go +++ /dev/null @@ -1,142 +0,0 @@ -package listenkv - -import ( - "io" - - "cosmossdk.io/store/types" -) - -var _ types.KVStore = &Store{} - -// Store implements the KVStore interface with listening enabled. -// Operations are traced on each core KVStore call and written to any of the -// underlying listeners with the proper key and operation permissions -type Store struct { - parent types.KVStore - listener *types.MemoryListener - parentStoreKey types.StoreKey -} - -// NewStore returns a reference to a new traceKVStore given a parent -// KVStore implementation and a buffered writer. -func NewStore(parent types.KVStore, parentStoreKey types.StoreKey, listener *types.MemoryListener) *Store { - return &Store{parent: parent, listener: listener, parentStoreKey: parentStoreKey} -} - -// Get implements the KVStore interface. It traces a read operation and -// delegates a Get call to the parent KVStore. -func (s *Store) Get(key []byte) []byte { - value := s.parent.Get(key) - return value -} - -// Set implements the KVStore interface. It traces a write operation and -// delegates the Set call to the parent KVStore. -func (s *Store) Set(key, value []byte) { - types.AssertValidKey(key) - s.parent.Set(key, value) - s.listener.OnWrite(s.parentStoreKey, key, value, false) -} - -// Delete implements the KVStore interface. It traces a write operation and -// delegates the Delete call to the parent KVStore. -func (s *Store) Delete(key []byte) { - s.parent.Delete(key) - s.listener.OnWrite(s.parentStoreKey, key, nil, true) -} - -// Has implements the KVStore interface. It delegates the Has call to the -// parent KVStore. -func (s *Store) Has(key []byte) bool { - return s.parent.Has(key) -} - -// Iterator implements the KVStore interface. It delegates the Iterator call -// the to the parent KVStore. -func (s *Store) Iterator(start, end []byte) types.Iterator { - return s.iterator(start, end, true) -} - -// ReverseIterator implements the KVStore interface. It delegates the -// ReverseIterator call the to the parent KVStore. -func (s *Store) ReverseIterator(start, end []byte) types.Iterator { - return s.iterator(start, end, false) -} - -// iterator facilitates iteration over a KVStore. It delegates the necessary -// calls to it's parent KVStore. -func (s *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator - - if ascending { - parent = s.parent.Iterator(start, end) - } else { - parent = s.parent.ReverseIterator(start, end) - } - - return newTraceIterator(parent, s.listener) -} - -type listenIterator struct { - parent types.Iterator - listener *types.MemoryListener -} - -func newTraceIterator(parent types.Iterator, listener *types.MemoryListener) types.Iterator { - return &listenIterator{parent: parent, listener: listener} -} - -// Domain implements the Iterator interface. -func (li *listenIterator) Domain() (start, end []byte) { - return li.parent.Domain() -} - -// Valid implements the Iterator interface. -func (li *listenIterator) Valid() bool { - return li.parent.Valid() -} - -// Next implements the Iterator interface. -func (li *listenIterator) Next() { - li.parent.Next() -} - -// Key implements the Iterator interface. -func (li *listenIterator) Key() []byte { - key := li.parent.Key() - return key -} - -// Value implements the Iterator interface. -func (li *listenIterator) Value() []byte { - value := li.parent.Value() - return value -} - -// Close implements the Iterator interface. -func (li *listenIterator) Close() error { - return li.parent.Close() -} - -// Error delegates the Error call to the parent iterator. -func (li *listenIterator) Error() error { - return li.parent.Error() -} - -// GetStoreType implements the KVStore interface. It returns the underlying -// KVStore type. -func (s *Store) GetStoreType() types.StoreType { - return s.parent.GetStoreType() -} - -// CacheWrap implements the KVStore interface. It panics as a Store -// cannot be cache wrapped. -func (s *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a ListenKVStore") -} - -// CacheWrapWithTrace implements the KVStore interface. It panics as a -// Store cannot be cache wrapped. -func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a ListenKVStore") -} diff --git a/store/listenkv/store_test.go b/store/listenkv/store_test.go deleted file mode 100644 index 51b88912c2..0000000000 --- a/store/listenkv/store_test.go +++ /dev/null @@ -1,281 +0,0 @@ -package listenkv_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/listenkv" - "cosmossdk.io/store/prefix" - "cosmossdk.io/store/types" -) - -func bz(s string) []byte { return []byte(s) } - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -var kvPairs = []kv.Pair{ - {Key: keyFmt(1), Value: valFmt(1)}, - {Key: keyFmt(2), Value: valFmt(2)}, - {Key: keyFmt(3), Value: valFmt(3)}, -} - -var testStoreKey = types.NewKVStoreKey("listen_test") - -func newListenKVStore(listener *types.MemoryListener) *listenkv.Store { - store := newEmptyListenKVStore(listener) - - for _, kvPair := range kvPairs { - store.Set(kvPair.Key, kvPair.Value) - } - - return store -} - -func newEmptyListenKVStore(listener *types.MemoryListener) *listenkv.Store { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - - return listenkv.NewStore(memDB, testStoreKey, listener) -} - -func TestListenKVStoreGet(t *testing.T) { - testCases := []struct { - key []byte - expectedValue []byte - }{ - { - key: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - }, - { - key: []byte("does-not-exist"), - expectedValue: nil, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - value := store.Get(tc.key) - - require.Equal(t, tc.expectedValue, value) - } -} - -func TestListenKVStoreSet(t *testing.T) { - testCases := []struct { - key []byte - value []byte - expectedOut *types.StoreKVPair - }{ - { - key: kvPairs[0].Key, - value: kvPairs[0].Value, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[0].Key, - Value: kvPairs[0].Value, - StoreKey: testStoreKey.Name(), - Delete: false, - }, - }, - { - key: kvPairs[1].Key, - value: kvPairs[1].Value, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[1].Key, - Value: kvPairs[1].Value, - StoreKey: testStoreKey.Name(), - Delete: false, - }, - }, - { - key: kvPairs[2].Key, - value: kvPairs[2].Value, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[2].Key, - Value: kvPairs[2].Value, - StoreKey: testStoreKey.Name(), - Delete: false, - }, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newEmptyListenKVStore(listener) - store.Set(tc.key, tc.value) - storeKVPair := listener.PopStateCache()[0] - - require.Equal(t, tc.expectedOut, storeKVPair) - } - - listener := types.NewMemoryListener() - store := newEmptyListenKVStore(listener) - require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") -} - -func TestListenKVStoreDelete(t *testing.T) { - testCases := []struct { - key []byte - expectedOut *types.StoreKVPair - }{ - { - key: kvPairs[0].Key, - expectedOut: &types.StoreKVPair{ - Key: kvPairs[0].Key, - Value: nil, - StoreKey: testStoreKey.Name(), - Delete: true, - }, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - store.Delete(tc.key) - cache := listener.PopStateCache() - require.NotEmpty(t, cache) - storeKVPair := cache[len(cache)-1] - - require.Equal(t, tc.expectedOut, storeKVPair) - } -} - -func TestListenKVStoreHas(t *testing.T) { - testCases := []struct { - key []byte - expected bool - }{ - { - key: kvPairs[0].Key, - expected: true, - }, - } - - for _, tc := range testCases { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - ok := store.Has(tc.key) - - require.Equal(t, tc.expected, ok) - } -} - -func TestTestListenKVStoreIterator(t *testing.T) { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - iterator := store.Iterator(nil, nil) - - s, e := iterator.Domain() - require.Equal(t, []byte(nil), s) - require.Equal(t, []byte(nil), e) - - testCases := []struct { - expectedKey []byte - expectedValue []byte - }{ - { - expectedKey: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - }, - { - expectedKey: kvPairs[1].Key, - expectedValue: kvPairs[1].Value, - }, - { - expectedKey: kvPairs[2].Key, - expectedValue: kvPairs[2].Value, - }, - } - - for _, tc := range testCases { - ka := iterator.Key() - require.Equal(t, tc.expectedKey, ka) - - va := iterator.Value() - require.Equal(t, tc.expectedValue, va) - - iterator.Next() - } - - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) -} - -func TestTestListenKVStoreReverseIterator(t *testing.T) { - listener := types.NewMemoryListener() - - store := newListenKVStore(listener) - iterator := store.ReverseIterator(nil, nil) - - s, e := iterator.Domain() - require.Equal(t, []byte(nil), s) - require.Equal(t, []byte(nil), e) - - testCases := []struct { - expectedKey []byte - expectedValue []byte - }{ - { - expectedKey: kvPairs[2].Key, - expectedValue: kvPairs[2].Value, - }, - { - expectedKey: kvPairs[1].Key, - expectedValue: kvPairs[1].Value, - }, - { - expectedKey: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - }, - } - - for _, tc := range testCases { - ka := iterator.Key() - require.Equal(t, tc.expectedKey, ka) - - va := iterator.Value() - require.Equal(t, tc.expectedValue, va) - - iterator.Next() - } - - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) -} - -func TestListenKVStorePrefix(t *testing.T) { - store := newEmptyListenKVStore(nil) - pStore := prefix.NewStore(store, []byte("listen_prefix")) - require.IsType(t, prefix.Store{}, pStore) -} - -func TestListenKVStoreGetStoreType(t *testing.T) { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - store := newEmptyListenKVStore(nil) - require.Equal(t, memDB.GetStoreType(), store.GetStoreType()) -} - -func TestListenKVStoreCacheWrap(t *testing.T) { - store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) -} - -func TestListenKVStoreCacheWrapWithTrace(t *testing.T) { - store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) -} diff --git a/store/mem/mem_test.go b/store/mem/mem_test.go deleted file mode 100644 index 6595b45dce..0000000000 --- a/store/mem/mem_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package mem_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/mem" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -func TestStore(t *testing.T) { - db := mem.NewStore() - require.Equal(t, types.StoreTypeMemory, db.GetStoreType()) - - key, value := []byte("key"), []byte("value") - - require.Nil(t, db.Get(key)) - db.Set(key, value) - require.Equal(t, value, db.Get(key)) - - newValue := []byte("newValue") - db.Set(key, newValue) - require.Equal(t, newValue, db.Get(key)) - - db.Delete(key) - require.Nil(t, db.Get(key)) - - cacheWrapper := db.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} - -func TestCommit(t *testing.T) { - db := mem.NewStore() - key, value := []byte("key"), []byte("value") - - db.Set(key, value) - id := db.Commit() - require.True(t, id.IsZero()) - require.True(t, db.LastCommitID().IsZero()) - require.Equal(t, value, db.Get(key)) -} - -func TestStorePrunningOptions(t *testing.T) { - // this is a no-op - db := mem.NewStore() - require.Equal(t, pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined), db.GetPruning()) -} diff --git a/store/mem/store.go b/store/mem/store.go deleted file mode 100644 index b819d75363..0000000000 --- a/store/mem/store.go +++ /dev/null @@ -1,62 +0,0 @@ -package mem - -import ( - "io" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -var ( - _ types.KVStore = (*Store)(nil) - _ types.Committer = (*Store)(nil) -) - -// Store implements an in-memory only KVStore. Entries are persisted between -// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node -type Store struct { - dbadapter.Store -} - -func NewStore() *Store { - return NewStoreWithDB(dbm.NewMemDB()) -} - -func NewStoreWithDB(db *dbm.MemDB) *Store { //nolint: interfacer // Concrete return type is fine here. - return &Store{Store: dbadapter.Store{DB: db}} -} - -// GetStoreType returns the Store's type. -func (s Store) GetStoreType() types.StoreType { - return types.StoreTypeMemory -} - -// CacheWrap branches the underlying store. -func (s Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(s) -} - -// CacheWrapWithTrace implements KVStore. -func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(s, w, tc)) -} - -// Commit performs a no-op as entries are persistent between commitments. -func (s *Store) Commit() (id types.CommitID) { return } - -func (s *Store) SetPruning(pruning pruningtypes.PruningOptions) {} - -// GetPruning is a no-op as pruning options cannot be directly set on this store. -// They must be set on the root commit multi-store. -func (s *Store) GetPruning() pruningtypes.PruningOptions { - return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) -} - -func (s Store) LastCommitID() (id types.CommitID) { return } - -func (s Store) WorkingHash() (hash []byte) { return } diff --git a/store/metrics/telemetry.go b/store/metrics/telemetry.go deleted file mode 100644 index d5bc55c45b..0000000000 --- a/store/metrics/telemetry.go +++ /dev/null @@ -1,56 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/hashicorp/go-metrics" -) - -// StoreMetrics defines the set of metrics for the store package -type StoreMetrics interface { - MeasureSince(keys ...string) -} - -var ( - _ StoreMetrics = Metrics{} - _ StoreMetrics = NoOpMetrics{} -) - -// Metrics defines the metrics wrapper for the store package -type Metrics struct { - Labels []metrics.Label -} - -// NewMetrics returns a new instance of the Metrics with labels set by the node operator -func NewMetrics(labels [][]string) Metrics { - gatherer := Metrics{} - - if numGlobalLables := len(labels); numGlobalLables > 0 { - parsedGlobalLabels := make([]metrics.Label, numGlobalLables) - for i, gl := range labels { - parsedGlobalLabels[i] = metrics.Label{Name: gl[0], Value: gl[1]} - } - - gatherer.Labels = parsedGlobalLabels - } - - return gatherer -} - -// MeasureSince provides a wrapper functionality for emitting a time measure -// metric with global labels (if any). -func (m Metrics) MeasureSince(keys ...string) { - start := time.Now() - metrics.MeasureSinceWithLabels(keys, start.UTC(), m.Labels) -} - -// NoOpMetrics is a no-op implementation of the StoreMetrics interface -type NoOpMetrics struct{} - -// NewNoOpMetrics returns a new instance of the NoOpMetrics -func NewNoOpMetrics() NoOpMetrics { - return NoOpMetrics{} -} - -// MeasureSince is a no-op implementation of the StoreMetrics interface to avoid time.Now() calls -func (m NoOpMetrics) MeasureSince(keys ...string) {} diff --git a/store/mock/cosmos_cosmos_db_DB.go b/store/mock/cosmos_cosmos_db_DB.go deleted file mode 100644 index 4a79ee7956..0000000000 --- a/store/mock/cosmos_cosmos_db_DB.go +++ /dev/null @@ -1,221 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/cosmos/cosmos-db (interfaces: DB) - -// Package mock is a generated GoMock package. -package mock - -import ( - reflect "reflect" - - db "github.com/cosmos/cosmos-db" - gomock "github.com/golang/mock/gomock" -) - -// MockDB is a mock of DB interface. -type MockDB struct { - ctrl *gomock.Controller - recorder *MockDBMockRecorder -} - -// MockDBMockRecorder is the mock recorder for MockDB. -type MockDBMockRecorder struct { - mock *MockDB -} - -// NewMockDB creates a new mock instance. -func NewMockDB(ctrl *gomock.Controller) *MockDB { - mock := &MockDB{ctrl: ctrl} - mock.recorder = &MockDBMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDB) EXPECT() *MockDBMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockDB) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockDBMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDB)(nil).Close)) -} - -// Delete mocks base method. -func (m *MockDB) Delete(arg0 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockDBMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDB)(nil).Delete), arg0) -} - -// DeleteSync mocks base method. -func (m *MockDB) DeleteSync(arg0 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteSync", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteSync indicates an expected call of DeleteSync. -func (mr *MockDBMockRecorder) DeleteSync(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSync", reflect.TypeOf((*MockDB)(nil).DeleteSync), arg0) -} - -// Get mocks base method. -func (m *MockDB) Get(arg0 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockDBMockRecorder) Get(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDB)(nil).Get), arg0) -} - -// Has mocks base method. -func (m *MockDB) Has(arg0 []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Has", arg0) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Has indicates an expected call of Has. -func (mr *MockDBMockRecorder) Has(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDB)(nil).Has), arg0) -} - -// Iterator mocks base method. -func (m *MockDB) Iterator(arg0, arg1 []byte) (db.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Iterator", arg0, arg1) - ret0, _ := ret[0].(db.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Iterator indicates an expected call of Iterator. -func (mr *MockDBMockRecorder) Iterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDB)(nil).Iterator), arg0, arg1) -} - -// NewBatch mocks base method. -func (m *MockDB) NewBatch() db.Batch { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBatch") - ret0, _ := ret[0].(db.Batch) - return ret0 -} - -// NewBatch indicates an expected call of NewBatch. -func (mr *MockDBMockRecorder) NewBatch() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockDB)(nil).NewBatch)) -} - -// NewBatchWithSize mocks base method. -func (m *MockDB) NewBatchWithSize(arg0 int) db.Batch { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewBatchWithSize", arg0) - ret0, _ := ret[0].(db.Batch) - return ret0 -} - -// NewBatchWithSize indicates an expected call of NewBatchWithSize. -func (mr *MockDBMockRecorder) NewBatchWithSize(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatchWithSize", reflect.TypeOf((*MockDB)(nil).NewBatchWithSize), arg0) -} - -// Print mocks base method. -func (m *MockDB) Print() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Print") - ret0, _ := ret[0].(error) - return ret0 -} - -// Print indicates an expected call of Print. -func (mr *MockDBMockRecorder) Print() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockDB)(nil).Print)) -} - -// ReverseIterator mocks base method. -func (m *MockDB) ReverseIterator(arg0, arg1 []byte) (db.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReverseIterator", arg0, arg1) - ret0, _ := ret[0].(db.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReverseIterator indicates an expected call of ReverseIterator. -func (mr *MockDBMockRecorder) ReverseIterator(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDB)(nil).ReverseIterator), arg0, arg1) -} - -// Set mocks base method. -func (m *MockDB) Set(arg0, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Set", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Set indicates an expected call of Set. -func (mr *MockDBMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDB)(nil).Set), arg0, arg1) -} - -// SetSync mocks base method. -func (m *MockDB) SetSync(arg0, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetSync", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetSync indicates an expected call of SetSync. -func (mr *MockDBMockRecorder) SetSync(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSync", reflect.TypeOf((*MockDB)(nil).SetSync), arg0, arg1) -} - -// Stats mocks base method. -func (m *MockDB) Stats() map[string]string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stats") - ret0, _ := ret[0].(map[string]string) - return ret0 -} - -// Stats indicates an expected call of Stats. -func (mr *MockDBMockRecorder) Stats() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDB)(nil).Stats)) -} diff --git a/store/prefix/store.go b/store/prefix/store.go deleted file mode 100644 index 26b8b0344a..0000000000 --- a/store/prefix/store.go +++ /dev/null @@ -1,207 +0,0 @@ -package prefix - -import ( - "bytes" - "errors" - "io" - - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -var _ types.KVStore = Store{} - -// Store is similar with cometbft/cometbft/libs/db/prefix_db -// both gives access only to the limited subset of the store -// for convenience or safety -type Store struct { - parent types.KVStore - prefix []byte -} - -func NewStore(parent types.KVStore, prefix []byte) Store { - return Store{ - parent: parent, - prefix: prefix, - } -} - -func cloneAppend(bz, tail []byte) (res []byte) { - res = make([]byte, len(bz)+len(tail)) - copy(res, bz) - copy(res[len(bz):], tail) - return -} - -func (s Store) key(key []byte) (res []byte) { - if key == nil { - panic("nil key on Store") - } - res = cloneAppend(s.prefix, key) - return -} - -// GetStoreType implements Store -func (s Store) GetStoreType() types.StoreType { - return s.parent.GetStoreType() -} - -// CacheWrap implements CacheWrap -func (s Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(s) -} - -// CacheWrapWithTrace implements the KVStore interface. -func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(s, w, tc)) -} - -// Get implements KVStore -func (s Store) Get(key []byte) []byte { - res := s.parent.Get(s.key(key)) - return res -} - -// Has implements KVStore -func (s Store) Has(key []byte) bool { - return s.parent.Has(s.key(key)) -} - -// Set implements KVStore -func (s Store) Set(key, value []byte) { - types.AssertValidKey(key) - types.AssertValidValue(value) - s.parent.Set(s.key(key), value) -} - -// Delete implements KVStore -func (s Store) Delete(key []byte) { - s.parent.Delete(s.key(key)) -} - -// Iterator implements KVStore -// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L106 -func (s Store) Iterator(start, end []byte) types.Iterator { - newstart := cloneAppend(s.prefix, start) - - var newend []byte - if end == nil { - newend = cpIncr(s.prefix) - } else { - newend = cloneAppend(s.prefix, end) - } - - iter := s.parent.Iterator(newstart, newend) - - return newPrefixIterator(s.prefix, start, end, iter) -} - -// ReverseIterator implements KVStore -// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L129 -func (s Store) ReverseIterator(start, end []byte) types.Iterator { - newstart := cloneAppend(s.prefix, start) - - var newend []byte - if end == nil { - newend = cpIncr(s.prefix) - } else { - newend = cloneAppend(s.prefix, end) - } - - iter := s.parent.ReverseIterator(newstart, newend) - - return newPrefixIterator(s.prefix, start, end, iter) -} - -var _ types.Iterator = (*prefixIterator)(nil) - -type prefixIterator struct { - prefix []byte - start []byte - end []byte - iter types.Iterator - valid bool -} - -func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefixIterator { - return &prefixIterator{ - prefix: prefix, - start: start, - end: end, - iter: parent, - valid: parent.Valid() && bytes.HasPrefix(parent.Key(), prefix), - } -} - -// Domain implements Iterator -func (pi *prefixIterator) Domain() ([]byte, []byte) { - return pi.start, pi.end -} - -// Valid implements Iterator -func (pi *prefixIterator) Valid() bool { - return pi.valid && pi.iter.Valid() -} - -// Next implements Iterator -func (pi *prefixIterator) Next() { - if !pi.valid { - panic("prefixIterator invalid, cannot call Next()") - } - - if pi.iter.Next(); !pi.iter.Valid() || !bytes.HasPrefix(pi.iter.Key(), pi.prefix) { - // TODO: shouldn't pi be set to nil instead? - pi.valid = false - } -} - -// Key implements Iterator -func (pi *prefixIterator) Key() (key []byte) { - if !pi.valid { - panic("prefixIterator invalid, cannot call Key()") - } - - key = pi.iter.Key() - key = stripPrefix(key, pi.prefix) - - return -} - -// Value implements Iterator -func (pi *prefixIterator) Value() []byte { - if !pi.valid { - panic("prefixIterator invalid, cannot call Value()") - } - - return pi.iter.Value() -} - -// Close implements Iterator -func (pi *prefixIterator) Close() error { - return pi.iter.Close() -} - -// Error returns an error if the prefixIterator is invalid defined by the Valid -// method. -func (pi *prefixIterator) Error() error { - if !pi.Valid() { - return errors.New("invalid prefixIterator") - } - - return nil -} - -// copied from github.com/cometbft/cometbft/libs/db/prefix_db.go -func stripPrefix(key, prefix []byte) []byte { - if len(key) < len(prefix) || !bytes.Equal(key[:len(prefix)], prefix) { - panic("should not happen") - } - - return key[len(prefix):] -} - -// wrapping types.PrefixEndBytes -func cpIncr(bz []byte) []byte { - return types.PrefixEndBytes(bz) -} diff --git a/store/prefix/store_test.go b/store/prefix/store_test.go deleted file mode 100644 index 01022aa7d2..0000000000 --- a/store/prefix/store_test.go +++ /dev/null @@ -1,451 +0,0 @@ -package prefix - -import ( - "crypto/rand" - "testing" - - dbm "github.com/cosmos/cosmos-db" - tiavl "github.com/cosmos/iavl" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/gaskv" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/types" - "cosmossdk.io/store/wrapper" -) - -// copied from iavl/store_test.go -var ( - cacheSize = 100 -) - -func bz(s string) []byte { return []byte(s) } - -type kvpair struct { - key []byte - value []byte -} - -func genRandomKVPairs(t *testing.T) []kvpair { - t.Helper() - kvps := make([]kvpair, 20) - - for i := 0; i < 20; i++ { - kvps[i].key = make([]byte, 32) - _, err := rand.Read(kvps[i].key) - require.NoError(t, err) - kvps[i].value = make([]byte, 32) - _, err = rand.Read(kvps[i].value) - require.NoError(t, err) - } - - return kvps -} - -func setRandomKVPairs(t *testing.T, store types.KVStore) []kvpair { - t.Helper() - kvps := genRandomKVPairs(t) - for _, kvp := range kvps { - store.Set(kvp.key, kvp.value) - } - return kvps -} - -func testPrefixStore(t *testing.T, baseStore types.KVStore, prefix []byte) { - t.Helper() - prefixStore := NewStore(baseStore, prefix) - prefixPrefixStore := NewStore(prefixStore, []byte("prefix")) - - require.Panics(t, func() { prefixStore.Get(nil) }) - require.Panics(t, func() { prefixStore.Set(nil, []byte{}) }) - - kvps := setRandomKVPairs(t, prefixPrefixStore) - - for i := 0; i < 20; i++ { - key := kvps[i].key - value := kvps[i].value - require.True(t, prefixPrefixStore.Has(key)) - require.Equal(t, value, prefixPrefixStore.Get(key)) - - key = append([]byte("prefix"), key...) - require.True(t, prefixStore.Has(key)) - require.Equal(t, value, prefixStore.Get(key)) - key = append(prefix, key...) - require.True(t, baseStore.Has(key)) - require.Equal(t, value, baseStore.Get(key)) - - key = kvps[i].key - prefixPrefixStore.Delete(key) - require.False(t, prefixPrefixStore.Has(key)) - require.Nil(t, prefixPrefixStore.Get(key)) - key = append([]byte("prefix"), key...) - require.False(t, prefixStore.Has(key)) - require.Nil(t, prefixStore.Get(key)) - key = append(prefix, key...) - require.False(t, baseStore.Has(key)) - require.Nil(t, baseStore.Get(key)) - } -} - -func TestIAVLStorePrefix(t *testing.T) { - db := wrapper.NewDBWrapper(dbm.NewMemDB()) - tree := tiavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger()) - iavlStore := iavl.UnsafeNewStore(tree) - - testPrefixStore(t, iavlStore, []byte("test")) -} - -func TestPrefixKVStoreNoNilSet(t *testing.T) { - meter := types.NewGasMeter(100000000) - mem := dbadapter.Store{DB: dbm.NewMemDB()} - gasStore := gaskv.NewStore(mem, meter, types.KVGasConfig()) - require.Panics(t, func() { gasStore.Set([]byte("key"), nil) }, "setting a nil value should panic") -} - -func TestPrefixStoreIterate(t *testing.T) { - db := dbm.NewMemDB() - baseStore := dbadapter.Store{DB: db} - prefix := []byte("test") - prefixStore := NewStore(baseStore, prefix) - - setRandomKVPairs(t, prefixStore) - - bIter := types.KVStorePrefixIterator(baseStore, prefix) - pIter := types.KVStorePrefixIterator(prefixStore, nil) - - for bIter.Valid() && pIter.Valid() { - require.Equal(t, bIter.Key(), append(prefix, pIter.Key()...)) - require.Equal(t, bIter.Value(), pIter.Value()) - - bIter.Next() - pIter.Next() - } - - bIter.Close() - pIter.Close() -} - -func incFirstByte(bz []byte) { - bz[0]++ -} - -func TestCloneAppend(t *testing.T) { - kvps := genRandomKVPairs(t) - for _, kvp := range kvps { - bz := cloneAppend(kvp.key, kvp.value) - require.Equal(t, bz, append(kvp.key, kvp.value...)) - - incFirstByte(bz) - require.NotEqual(t, bz, append(kvp.key, kvp.value...)) - - bz = cloneAppend(kvp.key, kvp.value) - incFirstByte(kvp.key) - require.NotEqual(t, bz, append(kvp.key, kvp.value...)) - - bz = cloneAppend(kvp.key, kvp.value) - incFirstByte(kvp.value) - require.NotEqual(t, bz, append(kvp.key, kvp.value...)) - } -} - -func TestPrefixStoreIteratorEdgeCase(t *testing.T) { - db := dbm.NewMemDB() - baseStore := dbadapter.Store{DB: db} - - // overflow in cpIncr - prefix := []byte{0xAA, 0xFF, 0xFF} - prefixStore := NewStore(baseStore, prefix) - - // ascending order - baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{}) - - iter := prefixStore.Iterator(nil, nil) - - checkDomain(t, iter, nil, nil) - checkItem(t, iter, []byte{}, bz("")) - checkNext(t, iter, true) - checkItem(t, iter, []byte{0x00}, bz("")) - checkNext(t, iter, false) - - checkInvalid(t, iter) - - iter.Close() -} - -func TestPrefixStoreReverseIteratorEdgeCase(t *testing.T) { - db := dbm.NewMemDB() - baseStore := dbadapter.Store{DB: db} - - // overflow in cpIncr - prefix := []byte{0xAA, 0xFF, 0xFF} - prefixStore := NewStore(baseStore, prefix) - - // descending order - baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{}) - - iter := prefixStore.ReverseIterator(nil, nil) - - checkDomain(t, iter, nil, nil) - checkItem(t, iter, []byte{0x00}, bz("")) - checkNext(t, iter, true) - checkItem(t, iter, []byte{}, bz("")) - checkNext(t, iter, false) - - checkInvalid(t, iter) - - iter.Close() - - db = dbm.NewMemDB() - baseStore = dbadapter.Store{DB: db} - - // underflow in cpDecr - prefix = []byte{0xAA, 0x00, 0x00} - prefixStore = NewStore(baseStore, prefix) - - baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00}, []byte{}) - baseStore.Set([]byte{0xAB, 0x00, 0x01}, []byte{}) - baseStore.Set([]byte{0xAA, 0x00, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xAA, 0x00, 0x00}, []byte{}) - baseStore.Set([]byte{0xA9, 0xFF, 0xFF, 0x00}, []byte{}) - baseStore.Set([]byte{0xA9, 0xFF, 0xFF}, []byte{}) - - iter = prefixStore.ReverseIterator(nil, nil) - - checkDomain(t, iter, nil, nil) - checkItem(t, iter, []byte{0x00}, bz("")) - checkNext(t, iter, true) - checkItem(t, iter, []byte{}, bz("")) - checkNext(t, iter, false) - - checkInvalid(t, iter) - - iter.Close() -} - -// Tests below are ported from https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db_test.go - -func mockStoreWithStuff() types.KVStore { - db := dbm.NewMemDB() - store := dbadapter.Store{DB: db} - // Under "key" prefix - store.Set(bz("key"), bz("value")) - store.Set(bz("key1"), bz("value1")) - store.Set(bz("key2"), bz("value2")) - store.Set(bz("key3"), bz("value3")) - store.Set(bz("something"), bz("else")) - store.Set(bz("k"), bz("val")) - store.Set(bz("ke"), bz("value")) - store.Set(bz("kee"), bz("valuu")) - return store -} - -func checkValue(t *testing.T, store types.KVStore, key, expected []byte) { - t.Helper() - bz := store.Get(key) - require.Equal(t, expected, bz) -} - -func checkValid(t *testing.T, itr types.Iterator, expected bool) { - t.Helper() - valid := itr.Valid() - require.Equal(t, expected, valid) -} - -func checkNext(t *testing.T, itr types.Iterator, expected bool) { - t.Helper() - itr.Next() - valid := itr.Valid() - require.Equal(t, expected, valid) -} - -func checkDomain(t *testing.T, itr types.Iterator, start, end []byte) { - t.Helper() - ds, de := itr.Domain() - require.Equal(t, start, ds) - require.Equal(t, end, de) -} - -func checkItem(t *testing.T, itr types.Iterator, key, value []byte) { - t.Helper() - require.Exactly(t, key, itr.Key()) - require.Exactly(t, value, itr.Value()) -} - -func checkInvalid(t *testing.T, itr types.Iterator) { - t.Helper() - checkValid(t, itr, false) - checkKeyPanics(t, itr) - checkValuePanics(t, itr) - checkNextPanics(t, itr) -} - -func checkKeyPanics(t *testing.T, itr types.Iterator) { - t.Helper() - require.Panics(t, func() { itr.Key() }) -} - -func checkValuePanics(t *testing.T, itr types.Iterator) { - t.Helper() - require.Panics(t, func() { itr.Value() }) -} - -func checkNextPanics(t *testing.T, itr types.Iterator) { - t.Helper() - require.Panics(t, func() { itr.Next() }) -} - -func TestPrefixDBSimple(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - checkValue(t, pstore, bz("key"), nil) - checkValue(t, pstore, bz(""), bz("value")) - checkValue(t, pstore, bz("key1"), nil) - checkValue(t, pstore, bz("1"), bz("value1")) - checkValue(t, pstore, bz("key2"), nil) - checkValue(t, pstore, bz("2"), bz("value2")) - checkValue(t, pstore, bz("key3"), nil) - checkValue(t, pstore, bz("3"), bz("value3")) - checkValue(t, pstore, bz("something"), nil) - checkValue(t, pstore, bz("k"), nil) - checkValue(t, pstore, bz("ke"), nil) - checkValue(t, pstore, bz("kee"), nil) -} - -func TestPrefixDBIterator1(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(nil, nil) - checkDomain(t, itr, nil, nil) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBIterator2(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(nil, bz("")) - checkDomain(t, itr, nil, bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBIterator3(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(bz(""), nil) - checkDomain(t, itr, bz(""), nil) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBIterator4(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.Iterator(bz(""), bz("")) - checkDomain(t, itr, bz(""), bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator1(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(nil, nil) - checkDomain(t, itr, nil, nil) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator2(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(bz(""), nil) - checkDomain(t, itr, bz(""), nil) - checkItem(t, itr, bz("3"), bz("value3")) - checkNext(t, itr, true) - checkItem(t, itr, bz("2"), bz("value2")) - checkNext(t, itr, true) - checkItem(t, itr, bz("1"), bz("value1")) - checkNext(t, itr, true) - checkItem(t, itr, bz(""), bz("value")) - checkNext(t, itr, false) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator3(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(nil, bz("")) - checkDomain(t, itr, nil, bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestPrefixDBReverseIterator4(t *testing.T) { - store := mockStoreWithStuff() - pstore := NewStore(store, bz("key")) - - itr := pstore.ReverseIterator(bz(""), bz("")) - checkInvalid(t, itr) - itr.Close() -} - -func TestCacheWraps(t *testing.T) { - db := dbm.NewMemDB() - store := dbadapter.Store{DB: db} - - cacheWrapper := store.CacheWrap() - require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) -} diff --git a/store/pruning/README.md b/store/pruning/README.md deleted file mode 100644 index 2548807e2a..0000000000 --- a/store/pruning/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Pruning - -## Overview - -Pruning is the mechanism for deleting old application heights from the disk. Depending on the use case, -nodes may require different pruning strategies. For example, archive nodes must keep all -the states and prune nothing. On the other hand, a regular validator node may want to only keep 100 latest heights for performance reasons. - -## Strategies - -The strategies are configured in `app.toml`, with the format `pruning = ""` where the options are: - -* `default`: only the last 362,880 states(approximately 3.5 weeks worth of state) are kept; pruning at 10 block intervals -* `nothing`: all historic states will be saved, nothing will be deleted (i.e. archiving node) -* `everything`: 2 latest states will be kept; pruning at 10 block intervals. -* `custom`: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' - -If no strategy is given to the BaseApp, `nothing` is selected. However, we perform validation on the CLI layer to require these to be always set in the config file. - -## Custom Pruning - -These are applied if and only if the pruning strategy is custom: - -* `pruning-keep-recent`: N means to keep all of the last N states -* `pruning-interval`: N means to delete old states from disk every Nth block. - -## Relationship to State Sync Snapshots - -Snapshot settings are optional. However, if set, they have an effect on how pruning is done by -persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. See the "Relationship to Pruning" section in `snapshots/README.md` for more details. diff --git a/store/pruning/export_test.go b/store/pruning/export_test.go deleted file mode 100644 index 676ff132ff..0000000000 --- a/store/pruning/export_test.go +++ /dev/null @@ -1,8 +0,0 @@ -package pruning - -var ( - PruneSnapshotHeightsKey = pruneSnapshotHeightsKey - - Int64SliceToBytes = int64SliceToBytes - LoadPruningSnapshotHeights = loadPruningSnapshotHeights -) diff --git a/store/pruning/manager.go b/store/pruning/manager.go deleted file mode 100644 index ddc9569ffe..0000000000 --- a/store/pruning/manager.go +++ /dev/null @@ -1,191 +0,0 @@ -package pruning - -import ( - "encoding/binary" - "fmt" - "sort" - "sync" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/pruning/types" - storetypes "cosmossdk.io/store/types" -) - -// Manager is an abstraction to handle the logic needed for -// determining when to prune old heights of the store -// based on the strategy described by the pruning options. -type Manager struct { - db dbm.DB - logger storetypes.Logger - opts types.PruningOptions - snapshotInterval uint64 - // Snapshots are taken in a separate goroutine from the regular execution - // and can be delivered asynchronously via HandleSnapshotHeight. - // Therefore, we sync access to pruneSnapshotHeights with this mutex. - pruneSnapshotHeightsMx sync.RWMutex - // These are the heights that are multiples of snapshotInterval and kept for state sync snapshots. - // The heights are added to be pruned when a snapshot is complete. - pruneSnapshotHeights []int64 -} - -// NegativeHeightsError is returned when a negative height is provided to the manager. -type NegativeHeightsError struct { - Height int64 -} - -var _ error = &NegativeHeightsError{} - -func (e *NegativeHeightsError) Error() string { - return fmt.Sprintf("failed to get pruned heights: %d", e.Height) -} - -var pruneSnapshotHeightsKey = []byte("s/prunesnapshotheights") - -// NewManager returns a new Manager with the given db and logger. -// The returned manager uses a pruning strategy of "nothing" which -// keeps all heights. Users of the Manager may change the strategy -// by calling SetOptions. -func NewManager(db dbm.DB, logger storetypes.Logger) *Manager { - return &Manager{ - db: db, - logger: logger, - opts: types.NewPruningOptions(types.PruningNothing), - pruneSnapshotHeights: []int64{0}, - } -} - -// SetOptions sets the pruning strategy on the manager. -func (m *Manager) SetOptions(opts types.PruningOptions) { - m.opts = opts -} - -// GetOptions fetches the pruning strategy from the manager. -func (m *Manager) GetOptions() types.PruningOptions { - return m.opts -} - -// HandleSnapshotHeight persists the snapshot height to be pruned at the next appropriate -// height defined by the pruning strategy. It flushes the update to disk and panics if the flush fails. -// The input height must be greater than 0, and the pruning strategy must not be set to pruning nothing. -// If either of these conditions is not met, this function does nothing. -func (m *Manager) HandleSnapshotHeight(height int64) { - if m.opts.GetPruningStrategy() == types.PruningNothing || height <= 0 { - return - } - - m.pruneSnapshotHeightsMx.Lock() - defer m.pruneSnapshotHeightsMx.Unlock() - - m.logger.Debug("HandleSnapshotHeight", "height", height) - m.pruneSnapshotHeights = append(m.pruneSnapshotHeights, height) - sort.Slice(m.pruneSnapshotHeights, func(i, j int) bool { return m.pruneSnapshotHeights[i] < m.pruneSnapshotHeights[j] }) - k := 1 - for ; k < len(m.pruneSnapshotHeights); k++ { - if m.pruneSnapshotHeights[k] != m.pruneSnapshotHeights[k-1]+int64(m.snapshotInterval) { - break - } - } - m.pruneSnapshotHeights = m.pruneSnapshotHeights[k-1:] - - // flush the updates to disk so that they are not lost if crash happens. - if err := m.db.SetSync(pruneSnapshotHeightsKey, int64SliceToBytes(m.pruneSnapshotHeights)); err != nil { - panic(err) - } -} - -// SetSnapshotInterval sets the interval at which the snapshots are taken. -func (m *Manager) SetSnapshotInterval(snapshotInterval uint64) { - m.snapshotInterval = snapshotInterval -} - -// GetPruningHeight returns the height which can prune up to if it is able to prune at the given height. -func (m *Manager) GetPruningHeight(height int64) int64 { - if m.opts.GetPruningStrategy() == types.PruningNothing { - return 0 - } - if m.opts.Interval <= 0 { - return 0 - } - - if height%int64(m.opts.Interval) != 0 || height <= int64(m.opts.KeepRecent) { - return 0 - } - - // Consider the snapshot height - pruneHeight := height - 1 - int64(m.opts.KeepRecent) // we should keep the current height at least - - m.pruneSnapshotHeightsMx.RLock() - defer m.pruneSnapshotHeightsMx.RUnlock() - - // snapshotInterval is zero, indicating that all heights can be pruned - if m.snapshotInterval <= 0 { - return pruneHeight - } - - if len(m.pruneSnapshotHeights) == 0 { // the length should be greater than zero - return 0 - } - - // the snapshot `m.pruneSnapshotHeights[0]` is already operated, - // so we can prune up to `m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1` - snHeight := m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1 - if snHeight < pruneHeight { - return snHeight - } - return pruneHeight -} - -// LoadSnapshotHeights loads the snapshot heights from the database as a crash recovery. -func (m *Manager) LoadSnapshotHeights(db dbm.DB) error { - if m.opts.GetPruningStrategy() == types.PruningNothing { - return nil - } - - loadedPruneSnapshotHeights, err := loadPruningSnapshotHeights(db) - if err != nil { - return err - } - - if len(loadedPruneSnapshotHeights) > 0 { - m.pruneSnapshotHeightsMx.Lock() - defer m.pruneSnapshotHeightsMx.Unlock() - m.pruneSnapshotHeights = loadedPruneSnapshotHeights - } - - return nil -} - -func loadPruningSnapshotHeights(db dbm.DB) ([]int64, error) { - bz, err := db.Get(pruneSnapshotHeightsKey) - if err != nil { - return nil, fmt.Errorf("failed to get post-snapshot pruned heights: %w", err) - } - if len(bz) == 0 { - return []int64{}, nil - } - - pruneSnapshotHeights := make([]int64, len(bz)/8) - i, offset := 0, 0 - for offset < len(bz) { - h := int64(binary.BigEndian.Uint64(bz[offset : offset+8])) - if h < 0 { - return nil, &NegativeHeightsError{Height: h} - } - pruneSnapshotHeights[i] = h - i++ - offset += 8 - } - - return pruneSnapshotHeights, nil -} - -func int64SliceToBytes(slice []int64) []byte { - bz := make([]byte, 0, len(slice)*8) - for _, ph := range slice { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(ph)) - bz = append(bz, buf...) - } - return bz -} diff --git a/store/pruning/manager_test.go b/store/pruning/manager_test.go deleted file mode 100644 index 006891de85..0000000000 --- a/store/pruning/manager_test.go +++ /dev/null @@ -1,303 +0,0 @@ -package pruning_test - -import ( - "errors" - "fmt" - "testing" - - db "github.com/cosmos/cosmos-db" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/mock" - "cosmossdk.io/store/pruning" - "cosmossdk.io/store/pruning/types" -) - -const dbErr = "db error" - -func TestNewManager(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - require.Equal(t, types.PruningNothing, manager.GetOptions().GetPruningStrategy()) -} - -func TestStrategies(t *testing.T) { - testcases := map[string]struct { - strategy types.PruningOptions - snapshotInterval uint64 - strategyToAssert types.PruningStrategy - isValid bool - }{ - "prune nothing - no snapshot": { - strategy: types.NewPruningOptions(types.PruningNothing), - strategyToAssert: types.PruningNothing, - }, - "prune nothing - snapshot": { - strategy: types.NewPruningOptions(types.PruningNothing), - strategyToAssert: types.PruningNothing, - snapshotInterval: 100, - }, - "prune default - no snapshot": { - strategy: types.NewPruningOptions(types.PruningDefault), - strategyToAssert: types.PruningDefault, - }, - "prune default - snapshot": { - strategy: types.NewPruningOptions(types.PruningDefault), - strategyToAssert: types.PruningDefault, - snapshotInterval: 100, - }, - "prune everything - no snapshot": { - strategy: types.NewPruningOptions(types.PruningEverything), - strategyToAssert: types.PruningEverything, - }, - "prune everything - snapshot": { - strategy: types.NewPruningOptions(types.PruningEverything), - strategyToAssert: types.PruningEverything, - snapshotInterval: 100, - }, - "custom 100-10-15": { - strategy: types.NewCustomPruningOptions(100, 15), - snapshotInterval: 10, - strategyToAssert: types.PruningCustom, - }, - "custom 10-10-15": { - strategy: types.NewCustomPruningOptions(10, 15), - snapshotInterval: 10, - strategyToAssert: types.PruningCustom, - }, - "custom 100-0-15": { - strategy: types.NewCustomPruningOptions(100, 15), - snapshotInterval: 0, - strategyToAssert: types.PruningCustom, - }, - } - - for name, tc := range testcases { - tc := tc // Local copy to avoid shadowing. - t.Run(name, func(t *testing.T) { - t.Parallel() - - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - - curStrategy := tc.strategy - manager.SetSnapshotInterval(tc.snapshotInterval) - - pruneStrategy := curStrategy.GetPruningStrategy() - require.Equal(t, tc.strategyToAssert, pruneStrategy) - - // Validate strategy parameters - switch pruneStrategy { - case types.PruningDefault: - require.Equal(t, uint64(362880), curStrategy.KeepRecent) - require.Equal(t, uint64(10), curStrategy.Interval) - case types.PruningNothing: - require.Equal(t, uint64(0), curStrategy.KeepRecent) - require.Equal(t, uint64(0), curStrategy.Interval) - case types.PruningEverything: - require.Equal(t, uint64(2), curStrategy.KeepRecent) - require.Equal(t, uint64(10), curStrategy.Interval) - default: - // - } - - manager.SetOptions(curStrategy) - require.Equal(t, tc.strategy, manager.GetOptions()) - - curKeepRecent := curStrategy.KeepRecent - snHeight := int64(tc.snapshotInterval - 1) - for curHeight := int64(0); curHeight < 110000; curHeight++ { - if tc.snapshotInterval != 0 { - if curHeight > int64(tc.snapshotInterval) && curHeight%int64(tc.snapshotInterval) == int64(tc.snapshotInterval)-1 { - manager.HandleSnapshotHeight(curHeight - int64(tc.snapshotInterval) + 1) - snHeight = curHeight - } - } - - pruningHeightActual := manager.GetPruningHeight(curHeight) - curHeightStr := fmt.Sprintf("height: %d", curHeight) - - switch curStrategy.GetPruningStrategy() { - case types.PruningNothing: - require.Equal(t, int64(0), pruningHeightActual, curHeightStr) - default: - if curHeight > int64(curKeepRecent) && curHeight%int64(curStrategy.Interval) == 0 { - pruningHeightExpected := curHeight - int64(curKeepRecent) - 1 - if tc.snapshotInterval > 0 && snHeight < pruningHeightExpected { - pruningHeightExpected = snHeight - } - require.Equal(t, pruningHeightExpected, pruningHeightActual, curHeightStr) - } else { - require.Equal(t, int64(0), pruningHeightActual, curHeightStr) - } - } - } - }) - } -} - -func TestPruningHeight_Inputs(t *testing.T) { - keepRecent := int64(types.NewPruningOptions(types.PruningEverything).KeepRecent) - interval := int64(types.NewPruningOptions(types.PruningEverything).Interval) - - testcases := map[string]struct { - height int64 - expectedResult int64 - strategy types.PruningStrategy - }{ - "currentHeight is negative - prune everything - invalid currentHeight": { - -1, - 0, - types.PruningEverything, - }, - "currentHeight is zero - prune everything - invalid currentHeight": { - 0, - 0, - types.PruningEverything, - }, - "currentHeight is positive but within keep recent- prune everything - not kept": { - keepRecent, - 0, - types.PruningEverything, - }, - "currentHeight is positive and equal to keep recent+1 - no kept": { - keepRecent + 1, - 0, - types.PruningEverything, - }, - "currentHeight is positive and greater than keep recent+1 but not multiple of interval - no kept": { - keepRecent + 2, - 0, - types.PruningEverything, - }, - "currentHeight is positive and greater than keep recent+1 and multiple of interval - kept": { - interval, - interval - keepRecent - 1, - types.PruningEverything, - }, - "pruning nothing, currentHeight is positive and greater than keep recent - not kept": { - interval, - 0, - types.PruningNothing, - }, - } - - for name, tc := range testcases { - t.Run(name, func(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - manager.SetOptions(types.NewPruningOptions(tc.strategy)) - - pruningHeightActual := manager.GetPruningHeight(tc.height) - require.Equal(t, tc.expectedResult, pruningHeightActual) - }) - } -} - -func TestHandleSnapshotHeight_DbErr_Panic(t *testing.T) { - ctrl := gomock.NewController(t) - - // Setup - dbMock := mock.NewMockDB(ctrl) - - dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) - - manager := pruning.NewManager(dbMock, log.NewNopLogger()) - manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) - require.NotNil(t, manager) - - defer func() { - if r := recover(); r == nil { - t.Fail() - } - }() - - manager.HandleSnapshotHeight(10) -} - -func TestHandleSnapshotHeight_LoadFromDisk(t *testing.T) { - snapshotInterval := uint64(10) - - // Setup - db := db.NewMemDB() - manager := pruning.NewManager(db, log.NewNopLogger()) - require.NotNil(t, manager) - - manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) - manager.SetSnapshotInterval(snapshotInterval) - - expected := 0 - for snapshotHeight := int64(-1); snapshotHeight < 100; snapshotHeight++ { - snapshotHeightStr := fmt.Sprintf("snaphost height: %d", snapshotHeight) - if snapshotHeight > int64(snapshotInterval) && snapshotHeight%int64(snapshotInterval) == 1 { - // Test flush - manager.HandleSnapshotHeight(snapshotHeight - 1) - expected = 1 - } - - loadedSnapshotHeights, err := pruning.LoadPruningSnapshotHeights(db) - require.NoError(t, err) - require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr) - - // Test load back - err = manager.LoadSnapshotHeights(db) - require.NoError(t, err) - - loadedSnapshotHeights, err = pruning.LoadPruningSnapshotHeights(db) - require.NoError(t, err) - require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr) - } -} - -func TestLoadPruningSnapshotHeights(t *testing.T) { - var ( - manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - err error - ) - require.NotNil(t, manager) - - // must not be PruningNothing - manager.SetOptions(types.NewPruningOptions(types.PruningDefault)) - - testcases := map[string]struct { - getFlushedPruningSnapshotHeights func() []int64 - expectedResult error - }{ - "negative snapshotPruningHeight - error": { - getFlushedPruningSnapshotHeights: func() []int64 { - return []int64{5, -2, 3} - }, - expectedResult: &pruning.NegativeHeightsError{Height: -2}, - }, - "non-negative - success": { - getFlushedPruningSnapshotHeights: func() []int64 { - return []int64{5, 0, 3} - }, - }, - } - - for name, tc := range testcases { - t.Run(name, func(t *testing.T) { - db := db.NewMemDB() - - if tc.getFlushedPruningSnapshotHeights != nil { - err = db.Set(pruning.PruneSnapshotHeightsKey, pruning.Int64SliceToBytes(tc.getFlushedPruningSnapshotHeights())) - require.NoError(t, err) - } - - err = manager.LoadSnapshotHeights(db) - require.Equal(t, tc.expectedResult, err) - }) - } -} - -func TestLoadSnapshotHeights_PruneNothing(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) - require.NotNil(t, manager) - - manager.SetOptions(types.NewPruningOptions(types.PruningNothing)) - - require.Nil(t, manager.LoadSnapshotHeights(db.NewMemDB())) -} diff --git a/store/pruning/types/options.go b/store/pruning/types/options.go deleted file mode 100644 index 229dbed984..0000000000 --- a/store/pruning/types/options.go +++ /dev/null @@ -1,130 +0,0 @@ -package types - -import ( - "errors" - "fmt" -) - -// PruningOptions defines the pruning strategy used when determining which -// heights are removed from disk when committing state. -type PruningOptions struct { - // KeepRecent defines how many recent heights to keep on disk. - KeepRecent uint64 - - // Interval defines when the pruned heights are removed from disk. - Interval uint64 - - // Strategy defines the kind of pruning strategy. See below for more information on each. - Strategy PruningStrategy -} - -type PruningStrategy int - -// Pruning option string constants -const ( - PruningOptionDefault = "default" - PruningOptionEverything = "everything" - PruningOptionNothing = "nothing" - PruningOptionCustom = "custom" -) - -const ( - // PruningDefault defines a pruning strategy where the last 362880 heights are - // kept where to-be pruned heights are pruned at every 10th height. - // The last 362880 heights are kept(approximately 3.5 weeks worth of state) assuming the typical - // block time is 6s. If these values do not match the applications' requirements, use the "custom" option. - PruningDefault PruningStrategy = iota - // PruningEverything defines a pruning strategy where all committed heights are - // deleted, storing only the current height and last 2 states. To-be pruned heights are - // pruned at every 10th height. - PruningEverything - // PruningNothing defines a pruning strategy where all heights are kept on disk. - // This is the only stretegy where KeepEvery=1 is allowed with state-sync snapshots disabled. - PruningNothing - // PruningCustom defines a pruning strategy where the user specifies the pruning. - PruningCustom - // PruningUndefined defines an undefined pruning strategy. It is to be returned by stores that do not support pruning. - PruningUndefined -) - -const ( - pruneEverythingKeepRecent = 2 - pruneEverythingInterval = 10 -) - -var ( - ErrPruningIntervalZero = errors.New("'pruning-interval' must not be 0. If you want to disable pruning, select pruning = \"nothing\"") - ErrPruningIntervalTooSmall = fmt.Errorf("'pruning-interval' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingInterval) - ErrPruningKeepRecentTooSmall = fmt.Errorf("'pruning-keep-recent' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingKeepRecent) -) - -func NewPruningOptions(pruningStrategy PruningStrategy) PruningOptions { - switch pruningStrategy { - case PruningDefault: - return PruningOptions{ - KeepRecent: 362880, - Interval: 10, - Strategy: PruningDefault, - } - case PruningEverything: - return PruningOptions{ - KeepRecent: pruneEverythingKeepRecent, - Interval: pruneEverythingInterval, - Strategy: PruningEverything, - } - case PruningNothing: - return PruningOptions{ - KeepRecent: 0, - Interval: 0, - Strategy: PruningNothing, - } - default: - return PruningOptions{ - Strategy: PruningCustom, - } - } -} - -func NewCustomPruningOptions(keepRecent, interval uint64) PruningOptions { - return PruningOptions{ - KeepRecent: keepRecent, - Interval: interval, - Strategy: PruningCustom, - } -} - -func (po PruningOptions) GetPruningStrategy() PruningStrategy { - return po.Strategy -} - -func (po PruningOptions) Validate() error { - if po.Strategy == PruningNothing { - return nil - } - if po.Interval == 0 { - return ErrPruningIntervalZero - } - if po.Interval < pruneEverythingInterval { - return ErrPruningIntervalTooSmall - } - if po.KeepRecent < pruneEverythingKeepRecent { - return ErrPruningKeepRecentTooSmall - } - return nil -} - -func NewPruningOptionsFromString(strategy string) PruningOptions { - switch strategy { - case PruningOptionEverything: - return NewPruningOptions(PruningEverything) - - case PruningOptionNothing: - return NewPruningOptions(PruningNothing) - - case PruningOptionDefault: - return NewPruningOptions(PruningDefault) - - default: - return NewPruningOptions(PruningDefault) - } -} diff --git a/store/pruning/types/options_test.go b/store/pruning/types/options_test.go deleted file mode 100644 index abc6bf39e2..0000000000 --- a/store/pruning/types/options_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestPruningOptions_Validate(t *testing.T) { - testCases := []struct { - opts PruningOptions - expectErr error - }{ - {NewPruningOptions(PruningDefault), nil}, - {NewPruningOptions(PruningEverything), nil}, - {NewPruningOptions(PruningNothing), nil}, - {NewPruningOptions(PruningCustom), ErrPruningIntervalZero}, - {NewCustomPruningOptions(2, 10), nil}, - {NewCustomPruningOptions(100, 15), nil}, - {NewCustomPruningOptions(1, 10), ErrPruningKeepRecentTooSmall}, - {NewCustomPruningOptions(2, 9), ErrPruningIntervalTooSmall}, - {NewCustomPruningOptions(2, 0), ErrPruningIntervalZero}, - {NewCustomPruningOptions(2, 0), ErrPruningIntervalZero}, - } - - for _, tc := range testCases { - err := tc.opts.Validate() - require.Equal(t, tc.expectErr, err, "options: %v, err: %s", tc.opts, err) - } -} - -func TestPruningOptions_GetStrategy(t *testing.T) { - testCases := []struct { - opts PruningOptions - expectedStrategy PruningStrategy - }{ - {NewPruningOptions(PruningDefault), PruningDefault}, - {NewPruningOptions(PruningEverything), PruningEverything}, - {NewPruningOptions(PruningNothing), PruningNothing}, - {NewPruningOptions(PruningCustom), PruningCustom}, - {NewCustomPruningOptions(2, 10), PruningCustom}, - } - - for _, tc := range testCases { - actualStrategy := tc.opts.GetPruningStrategy() - require.Equal(t, tc.expectedStrategy, actualStrategy) - } -} - -func TestNewPruningOptionsFromString(t *testing.T) { - testCases := []struct { - optString string - expect PruningOptions - }{ - {PruningOptionDefault, NewPruningOptions(PruningDefault)}, - {PruningOptionEverything, NewPruningOptions(PruningEverything)}, - {PruningOptionNothing, NewPruningOptions(PruningNothing)}, - {"invalid", NewPruningOptions(PruningDefault)}, - } - - for _, tc := range testCases { - actual := NewPruningOptionsFromString(tc.optString) - require.Equal(t, tc.expect, actual) - } -} diff --git a/store/reexport.go b/store/reexport.go deleted file mode 100644 index 9865cb9b03..0000000000 --- a/store/reexport.go +++ /dev/null @@ -1,29 +0,0 @@ -package store - -import ( - "cosmossdk.io/store/types" -) - -// Import cosmos-sdk/types/store.go for convenience. -type ( - Store = types.Store - Committer = types.Committer - CommitStore = types.CommitStore - MultiStore = types.MultiStore - CacheMultiStore = types.CacheMultiStore - CommitMultiStore = types.CommitMultiStore - KVStore = types.KVStore - Iterator = types.Iterator - CacheKVStore = types.CacheKVStore - CommitKVStore = types.CommitKVStore - CacheWrapper = types.CacheWrapper - CacheWrap = types.CacheWrap - CommitID = types.CommitID - Key = types.StoreKey - Type = types.StoreType - Queryable = types.Queryable - TraceContext = types.TraceContext - Gas = types.Gas - GasMeter = types.GasMeter - GasConfig = types.GasConfig -) diff --git a/store/rootmulti/dbadapter.go b/store/rootmulti/dbadapter.go deleted file mode 100644 index 65cd41c66a..0000000000 --- a/store/rootmulti/dbadapter.go +++ /dev/null @@ -1,49 +0,0 @@ -package rootmulti - -import ( - "cosmossdk.io/store/dbadapter" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -var commithash = []byte("FAKE_HASH") - -var ( - _ types.KVStore = (*commitDBStoreAdapter)(nil) - _ types.Committer = (*commitDBStoreAdapter)(nil) -) - -//---------------------------------------- -// commitDBStoreWrapper should only be used for simulation/debugging, -// as it doesn't compute any commit hash, and it cannot load older state. - -// Wrapper type for dbm.Db with implementation of KVStore -type commitDBStoreAdapter struct { - dbadapter.Store -} - -func (cdsa commitDBStoreAdapter) Commit() types.CommitID { - return types.CommitID{ - Version: -1, - Hash: commithash, - } -} - -func (cdsa commitDBStoreAdapter) LastCommitID() types.CommitID { - return types.CommitID{ - Version: -1, - Hash: commithash, - } -} - -func (cdsa commitDBStoreAdapter) WorkingHash() []byte { - return commithash -} - -func (cdsa commitDBStoreAdapter) SetPruning(_ pruningtypes.PruningOptions) {} - -// GetPruning is a no-op as pruning options cannot be directly set on this store. -// They must be set on the root commit multi-store. -func (cdsa commitDBStoreAdapter) GetPruning() pruningtypes.PruningOptions { - return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) -} diff --git a/store/rootmulti/proof.go b/store/rootmulti/proof.go deleted file mode 100644 index 5f1503be4b..0000000000 --- a/store/rootmulti/proof.go +++ /dev/null @@ -1,29 +0,0 @@ -package rootmulti - -import ( - "github.com/cometbft/cometbft/crypto/merkle" - - storetypes "cosmossdk.io/store/types" -) - -// RequireProof returns whether proof is required for the subpath. -func RequireProof(subpath string) bool { - // XXX: create a better convention. - // Currently, only when query subpath is "/key", will proof be included in - // response. If there are some changes about proof building in iavlstore.go, - // we must change code here to keep consistency with iavlStore#Query. - return subpath == "/key" -} - -//----------------------------------------------------------------------------- - -// DefaultProofRuntime returns a new ProofRuntime with default op decoders registered. -// It registers decoders for IAVL commitment and Simple Merkle commitment proof operations. -// XXX: This should be managed by the rootMultiStore which may want to register -// more proof ops? -func DefaultProofRuntime() (prt *merkle.ProofRuntime) { - prt = merkle.NewProofRuntime() - prt.RegisterOpDecoder(storetypes.ProofOpIAVLCommitment, storetypes.CommitmentOpDecoder) - prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder) - return -} diff --git a/store/rootmulti/proof_test.go b/store/rootmulti/proof_test.go deleted file mode 100644 index d573937c3d..0000000000 --- a/store/rootmulti/proof_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package rootmulti - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/types" -) - -func TestVerifyIAVLStoreQueryProof(t *testing.T) { - // Create main tree for testing. - db := dbm.NewMemDB() - iStore, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - store := iStore.(*iavl.Store) - require.Nil(t, err) - store.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := store.Commit() - - // Get Proof - res, err := store.Query(&types.RequestQuery{ - Path: "/key", // required path to get key/value+proof - Data: []byte("MYKEY"), - Prove: true, - }) - require.NoError(t, err) - require.NotNil(t, res.ProofOps) - - // Verify proof. - prt := DefaultProofRuntime() - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE")) - require.Nil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY_NOT", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE_NOT")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte(nil)) - require.NotNil(t, err) -} - -func TestVerifyMultiStoreQueryProof(t *testing.T) { - // Create main tree for testing. - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - iavlStoreKey := types.NewKVStoreKey("iavlStoreKey") - - store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil) - require.NoError(t, store.LoadVersion(0)) - - iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store) - iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := store.Commit() - - // Get Proof - res, err := store.Query(&types.RequestQuery{ - Path: "/iavlStoreKey/key", // required path to get key/value+proof - Data: []byte("MYKEY"), - Prove: true, - }) - require.NoError(t, err) - require.NotNil(t, res.ProofOps) - - // Verify proof. - prt := DefaultProofRuntime() - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE")) - require.Nil(t, err) - - // Verify proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE")) - require.Nil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY_NOT", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "iavlStoreKey/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE_NOT")) - require.NotNil(t, err) - - // Verify (bad) proof. - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte(nil)) - require.NotNil(t, err) -} - -func TestVerifyMultiStoreQueryProofAbsence(t *testing.T) { - // Create main tree for testing. - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - iavlStoreKey := types.NewKVStoreKey("iavlStoreKey") - - store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil) - err := store.LoadVersion(0) - require.NoError(t, err) - - iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store) - iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE")) - cid := store.Commit() // Commit with empty iavl store. - - // Get Proof - res, err := store.Query(&types.RequestQuery{ - Path: "/iavlStoreKey/key", // required path to get key/value+proof - Data: []byte("MYABSENTKEY"), - Prove: true, - }) - require.NoError(t, err) - require.NotNil(t, res.ProofOps) - - // Verify proof. - prt := DefaultProofRuntime() - err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY") - require.Nil(t, err) - - // Verify (bad) proof. - prt = DefaultProofRuntime() - err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/MYABSENTKEY") - require.NotNil(t, err) - - // Verify (bad) proof. - prt = DefaultProofRuntime() - err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY", []byte("")) - require.NotNil(t, err) -} diff --git a/store/rootmulti/snapshot_test.go b/store/rootmulti/snapshot_test.go deleted file mode 100644 index 635be92970..0000000000 --- a/store/rootmulti/snapshot_test.go +++ /dev/null @@ -1,321 +0,0 @@ -package rootmulti_test - -import ( - "crypto/sha256" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "io" - "math/rand" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/rootmulti" - "cosmossdk.io/store/snapshots" - snapshottypes "cosmossdk.io/store/snapshots/types" - "cosmossdk.io/store/types" -) - -func newMultiStoreWithGeneratedData(db dbm.DB, stores uint8, storeKeys uint64) *rootmulti.Store { - multiStore := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - r := rand.New(rand.NewSource(49872768940)) // Fixed seed for deterministic tests - - keys := []*types.KVStoreKey{} - for i := uint8(0); i < stores; i++ { - key := types.NewKVStoreKey(fmt.Sprintf("store%v", i)) - multiStore.MountStoreWithDB(key, types.StoreTypeIAVL, nil) - keys = append(keys, key) - } - err := multiStore.LoadLatestVersion() - if err != nil { - panic(err) - } - - for _, key := range keys { - store := multiStore.GetCommitKVStore(key).(*iavl.Store) - for i := uint64(0); i < storeKeys; i++ { - k := make([]byte, 8) - v := make([]byte, 1024) - binary.BigEndian.PutUint64(k, i) - _, err := r.Read(v) - if err != nil { - panic(err) - } - store.Set(k, v) - } - } - - multiStore.Commit() - err = multiStore.LoadLatestVersion() - if err != nil { - panic(err) - } - - return multiStore -} - -func newMultiStoreWithMixedMounts(db dbm.DB) *rootmulti.Store { - store := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("iavl3"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) - if err := store.LoadLatestVersion(); err != nil { - panic(err) - } - return store -} - -func newMultiStoreWithMixedMountsAndBasicData(db dbm.DB) *rootmulti.Store { - store := newMultiStoreWithMixedMounts(db) - store1 := store.GetStoreByName("iavl1").(types.CommitKVStore) - store2 := store.GetStoreByName("iavl2").(types.CommitKVStore) - trans1 := store.GetStoreByName("trans1").(types.KVStore) - - store1.Set([]byte("a"), []byte{1}) - store1.Set([]byte("b"), []byte{1}) - store2.Set([]byte("X"), []byte{255}) - store2.Set([]byte("A"), []byte{101}) - trans1.Set([]byte("x1"), []byte{91}) - store.Commit() - - store1.Set([]byte("b"), []byte{2}) - store1.Set([]byte("c"), []byte{3}) - store2.Set([]byte("B"), []byte{102}) - store.Commit() - - store2.Set([]byte("C"), []byte{103}) - store2.Delete([]byte("X")) - trans1.Set([]byte("x2"), []byte{92}) - store.Commit() - - return store -} - -func assertStoresEqual(t *testing.T, expect, actual types.CommitKVStore, msgAndArgs ...interface{}) { - t.Helper() - assert.Equal(t, expect.LastCommitID(), actual.LastCommitID()) - expectIter := expect.Iterator(nil, nil) - expectMap := map[string][]byte{} - for ; expectIter.Valid(); expectIter.Next() { - expectMap[string(expectIter.Key())] = expectIter.Value() - } - require.NoError(t, expectIter.Error()) - - actualIter := expect.Iterator(nil, nil) - actualMap := map[string][]byte{} - for ; actualIter.Valid(); actualIter.Next() { - actualMap[string(actualIter.Key())] = actualIter.Value() - } - require.NoError(t, actualIter.Error()) - - assert.Equal(t, expectMap, actualMap, msgAndArgs...) -} - -func TestMultistoreSnapshot_Checksum(t *testing.T) { - // Chunks from different nodes must fit together, so all nodes must produce identical chunks. - // This checksum test makes sure that the byte stream remains identical. If the test fails - // without having changed the data (e.g. because the Protobuf or zlib encoding changes), - // snapshottypes.CurrentFormat must be bumped. - store := newMultiStoreWithGeneratedData(dbm.NewMemDB(), 5, 10000) - version := uint64(store.LastCommitID().Version) - - testcases := []struct { - format uint32 - chunkHashes []string - }{ - {1, []string{ - "503e5b51b657055b77e88169fadae543619368744ad15f1de0736c0a20482f24", - "e1a0daaa738eeb43e778aefd2805e3dd720798288a410b06da4b8459c4d8f72e", - "aa048b4ee0f484965d7b3b06822cf0772cdcaad02f3b1b9055e69f2cb365ef3c", - "7921eaa3ed4921341e504d9308a9877986a879fe216a099c86e8db66fcba4c63", - "a4a864e6c02c9fca5837ec80dc84f650b25276ed7e4820cf7516ced9f9901b86", - "980925390cc50f14998ecb1e87de719ca9dd7e72f5fefbe445397bf670f36c31", - }}, - } - for _, tc := range testcases { - tc := tc - t.Run(fmt.Sprintf("Format %v", tc.format), func(t *testing.T) { - ch := make(chan io.ReadCloser) - go func() { - streamWriter := snapshots.NewStreamWriter(ch) - defer streamWriter.Close() - require.NotNil(t, streamWriter) - err := store.Snapshot(version, streamWriter) - require.NoError(t, err) - }() - hashes := []string{} - hasher := sha256.New() - for chunk := range ch { - hasher.Reset() - _, err := io.Copy(hasher, chunk) - require.NoError(t, err) - hashes = append(hashes, hex.EncodeToString(hasher.Sum(nil))) - } - assert.Equal(t, tc.chunkHashes, hashes, - "Snapshot output for format %v has changed", tc.format) - }) - } -} - -func TestMultistoreSnapshot_Errors(t *testing.T) { - store := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB()) - - testcases := map[string]struct { - height uint64 - expectType error - }{ - "0 height": {0, nil}, - "unknown height": {9, nil}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - err := store.Snapshot(tc.height, nil) - require.Error(t, err) - if tc.expectType != nil { - assert.True(t, errors.Is(err, tc.expectType)) - } - }) - } -} - -func TestMultistoreSnapshotRestore(t *testing.T) { - source := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB()) - target := newMultiStoreWithMixedMounts(dbm.NewMemDB()) - version := uint64(source.LastCommitID().Version) - require.EqualValues(t, 3, version) - dummyExtensionItem := snapshottypes.SnapshotItem{ - Item: &snapshottypes.SnapshotItem_Extension{ - Extension: &snapshottypes.SnapshotExtensionMeta{ - Name: "test", - Format: 1, - }, - }, - } - - chunks := make(chan io.ReadCloser, 100) - go func() { - streamWriter := snapshots.NewStreamWriter(chunks) - require.NotNil(t, streamWriter) - defer streamWriter.Close() - err := source.Snapshot(version, streamWriter) - require.NoError(t, err) - // write an extension metadata - err = streamWriter.WriteMsg(&dummyExtensionItem) - require.NoError(t, err) - }() - - streamReader, err := snapshots.NewStreamReader(chunks) - require.NoError(t, err) - nextItem, err := target.Restore(version, snapshottypes.CurrentFormat, streamReader) - require.NoError(t, err) - require.Equal(t, *dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) - - assert.Equal(t, source.LastCommitID(), target.LastCommitID()) - for _, key := range source.StoreKeysByName() { - sourceStore := source.GetStoreByName(key.Name()).(types.CommitKVStore) - targetStore := target.GetStoreByName(key.Name()).(types.CommitKVStore) - switch sourceStore.GetStoreType() { - case types.StoreTypeTransient: - assert.False(t, targetStore.Iterator(nil, nil).Valid(), - "transient store %v not empty", key.Name()) - default: - assertStoresEqual(t, sourceStore, targetStore, "store %q not equal", key.Name()) - } - } -} - -func benchmarkMultistoreSnapshot(b *testing.B, stores uint8, storeKeys uint64) { - b.Helper() - b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.") - - b.ReportAllocs() - b.StopTimer() - source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys) - version := source.LastCommitID().Version - require.EqualValues(b, 1, version) - b.StartTimer() - - for i := 0; i < b.N; i++ { - target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics()) - for _, key := range source.StoreKeysByName() { - target.MountStoreWithDB(key, types.StoreTypeIAVL, nil) - } - err := target.LoadLatestVersion() - require.NoError(b, err) - require.EqualValues(b, 0, target.LastCommitID().Version) - - chunks := make(chan io.ReadCloser) - go func() { - streamWriter := snapshots.NewStreamWriter(chunks) - require.NotNil(b, streamWriter) - err := source.Snapshot(uint64(version), streamWriter) - require.NoError(b, err) - }() - for reader := range chunks { - _, err := io.Copy(io.Discard, reader) - require.NoError(b, err) - err = reader.Close() - require.NoError(b, err) - } - } -} - -func benchmarkMultistoreSnapshotRestore(b *testing.B, stores uint8, storeKeys uint64) { - b.Helper() - b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.") - - b.ReportAllocs() - b.StopTimer() - source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys) - version := uint64(source.LastCommitID().Version) - require.EqualValues(b, 1, version) - b.StartTimer() - - for i := 0; i < b.N; i++ { - target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics()) - for _, key := range source.StoreKeysByName() { - target.MountStoreWithDB(key, types.StoreTypeIAVL, nil) - } - err := target.LoadLatestVersion() - require.NoError(b, err) - require.EqualValues(b, 0, target.LastCommitID().Version) - - chunks := make(chan io.ReadCloser) - go func() { - writer := snapshots.NewStreamWriter(chunks) - require.NotNil(b, writer) - err := source.Snapshot(version, writer) - require.NoError(b, err) - }() - reader, err := snapshots.NewStreamReader(chunks) - require.NoError(b, err) - _, err = target.Restore(version, snapshottypes.CurrentFormat, reader) - require.NoError(b, err) - require.Equal(b, source.LastCommitID(), target.LastCommitID()) - } -} - -func BenchmarkMultistoreSnapshot100K(b *testing.B) { - benchmarkMultistoreSnapshot(b, 10, 10000) -} - -func BenchmarkMultistoreSnapshot1M(b *testing.B) { - benchmarkMultistoreSnapshot(b, 10, 100000) -} - -func BenchmarkMultistoreSnapshotRestore100K(b *testing.B) { - benchmarkMultistoreSnapshotRestore(b, 10, 10000) -} - -func BenchmarkMultistoreSnapshotRestore1M(b *testing.B) { - benchmarkMultistoreSnapshotRestore(b, 10, 100000) -} diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go deleted file mode 100644 index 1226f63d1a..0000000000 --- a/store/rootmulti/store.go +++ /dev/null @@ -1,1264 +0,0 @@ -package rootmulti - -import ( - "crypto/sha256" - "errors" - "fmt" - "io" - "math" - "sort" - "strings" - "sync" - - cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" - dbm "github.com/cosmos/cosmos-db" - protoio "github.com/cosmos/gogoproto/io" - gogotypes "github.com/cosmos/gogoproto/types" - iavltree "github.com/cosmos/iavl" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/store/cachemulti" - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/listenkv" - "cosmossdk.io/store/mem" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/pruning" - pruningtypes "cosmossdk.io/store/pruning/types" - snapshottypes "cosmossdk.io/store/snapshots/types" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/transient" - "cosmossdk.io/store/types" -) - -const ( - latestVersionKey = "s/latest" - commitInfoKeyFmt = "s/%d" // s/ -) - -const iavlDisablefastNodeDefault = false - -// keysFromStoreKeyMap returns a slice of keys for the provided map lexically sorted by StoreKey.Name() -func keysFromStoreKeyMap[V any](m map[types.StoreKey]V) []types.StoreKey { - keys := make([]types.StoreKey, 0, len(m)) - for key := range m { - keys = append(keys, key) - } - sort.Slice(keys, func(i, j int) bool { - ki, kj := keys[i], keys[j] - return ki.Name() < kj.Name() - }) - return keys -} - -// Store is composed of many CommitStores. Name contrasts with -// cacheMultiStore which is used for branching other MultiStores. It implements -// the CommitMultiStore interface. -type Store struct { - db dbm.DB - logger iavltree.Logger - lastCommitInfo *types.CommitInfo - pruningManager *pruning.Manager - iavlCacheSize int - iavlDisableFastNode bool - storesParams map[types.StoreKey]storeParams - stores map[types.StoreKey]types.CommitKVStore - keysByName map[string]types.StoreKey - initialVersion int64 - removalMap map[types.StoreKey]bool - traceWriter io.Writer - traceContext types.TraceContext - traceContextMutex sync.Mutex - interBlockCache types.MultiStorePersistentCache - listeners map[types.StoreKey]*types.MemoryListener - metrics metrics.StoreMetrics - commitHeader cmtproto.Header -} - -var ( - _ types.CommitMultiStore = (*Store)(nil) - _ types.Queryable = (*Store)(nil) -) - -// NewStore returns a reference to a new Store object with the provided DB. The -// store will be created with a PruneNothing pruning strategy by default. After -// a store is created, KVStores must be mounted and finally LoadLatestVersion or -// LoadVersion must be called. -func NewStore(db dbm.DB, logger iavltree.Logger, metricGatherer metrics.StoreMetrics) *Store { - return &Store{ - db: db, - logger: logger, - iavlCacheSize: iavl.DefaultIAVLCacheSize, - iavlDisableFastNode: iavlDisablefastNodeDefault, - storesParams: make(map[types.StoreKey]storeParams), - stores: make(map[types.StoreKey]types.CommitKVStore), - keysByName: make(map[string]types.StoreKey), - listeners: make(map[types.StoreKey]*types.MemoryListener), - removalMap: make(map[types.StoreKey]bool), - pruningManager: pruning.NewManager(db, logger), - metrics: metricGatherer, - } -} - -// GetPruning fetches the pruning strategy from the root store. -func (rs *Store) GetPruning() pruningtypes.PruningOptions { - return rs.pruningManager.GetOptions() -} - -// SetPruning sets the pruning strategy on the root store and all the sub-stores. -// Note, calling SetPruning on the root store prior to LoadVersion or -// LoadLatestVersion performs a no-op as the stores aren't mounted yet. -func (rs *Store) SetPruning(pruningOpts pruningtypes.PruningOptions) { - rs.pruningManager.SetOptions(pruningOpts) -} - -// SetMetrics sets the metrics gatherer for the store package -func (rs *Store) SetMetrics(metrics metrics.StoreMetrics) { - rs.metrics = metrics -} - -// SetSnapshotInterval sets the interval at which the snapshots are taken. -// It is used by the store to determine which heights to retain until after the snapshot is complete. -func (rs *Store) SetSnapshotInterval(snapshotInterval uint64) { - rs.pruningManager.SetSnapshotInterval(snapshotInterval) -} - -func (rs *Store) SetIAVLCacheSize(cacheSize int) { - rs.iavlCacheSize = cacheSize -} - -func (rs *Store) SetIAVLDisableFastNode(disableFastNode bool) { - rs.iavlDisableFastNode = disableFastNode -} - -// GetStoreType implements Store. -func (rs *Store) GetStoreType() types.StoreType { - return types.StoreTypeMulti -} - -// MountStoreWithDB implements CommitMultiStore. -func (rs *Store) MountStoreWithDB(key types.StoreKey, typ types.StoreType, db dbm.DB) { - if key == nil { - panic("MountIAVLStore() key cannot be nil") - } - if _, ok := rs.storesParams[key]; ok { - panic(fmt.Sprintf("store duplicate store key %v", key)) - } - if _, ok := rs.keysByName[key.Name()]; ok { - panic(fmt.Sprintf("store duplicate store key name %v", key)) - } - rs.storesParams[key] = newStoreParams(key, db, typ, 0) - rs.keysByName[key.Name()] = key -} - -// GetCommitStore returns a mounted CommitStore for a given StoreKey. If the -// store is wrapped in an inter-block cache, it will be unwrapped before returning. -func (rs *Store) GetCommitStore(key types.StoreKey) types.CommitStore { - return rs.GetCommitKVStore(key) -} - -// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the -// store is wrapped in an inter-block cache, it will be unwrapped before returning. -func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { - // If the Store has an inter-block cache, first attempt to lookup and unwrap - // the underlying CommitKVStore by StoreKey. If it does not exist, fallback to - // the main mapping of CommitKVStores. - if rs.interBlockCache != nil { - if store := rs.interBlockCache.Unwrap(key); store != nil { - return store - } - } - - return rs.stores[key] -} - -// StoreKeysByName returns mapping storeNames -> StoreKeys -func (rs *Store) StoreKeysByName() map[string]types.StoreKey { - return rs.keysByName -} - -// LoadLatestVersionAndUpgrade implements CommitMultiStore -func (rs *Store) LoadLatestVersionAndUpgrade(upgrades *types.StoreUpgrades) error { - ver := GetLatestVersion(rs.db) - return rs.loadVersion(ver, upgrades) -} - -// LoadVersionAndUpgrade allows us to rename substores while loading an older version -func (rs *Store) LoadVersionAndUpgrade(ver int64, upgrades *types.StoreUpgrades) error { - return rs.loadVersion(ver, upgrades) -} - -// LoadLatestVersion implements CommitMultiStore. -func (rs *Store) LoadLatestVersion() error { - ver := GetLatestVersion(rs.db) - return rs.loadVersion(ver, nil) -} - -// LoadVersion implements CommitMultiStore. -func (rs *Store) LoadVersion(ver int64) error { - return rs.loadVersion(ver, nil) -} - -func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { - infos := make(map[string]types.StoreInfo) - - rs.logger.Debug("loadVersion", "ver", ver) - cInfo := &types.CommitInfo{} - - // load old data if we are not version 0 - if ver != 0 { - var err error - cInfo, err = rs.GetCommitInfo(ver) - if err != nil { - return err - } - - // convert StoreInfos slice to map - for _, storeInfo := range cInfo.StoreInfos { - infos[storeInfo.Name] = storeInfo - } - } - - // load each Store (note this doesn't panic on unmounted keys now) - newStores := make(map[types.StoreKey]types.CommitKVStore) - - storesKeys := make([]types.StoreKey, 0, len(rs.storesParams)) - - for key := range rs.storesParams { - storesKeys = append(storesKeys, key) - } - - if upgrades != nil { - // deterministic iteration order for upgrades - // (as the underlying store may change and - // upgrades make store changes where the execution order may matter) - sort.Slice(storesKeys, func(i, j int) bool { - return storesKeys[i].Name() < storesKeys[j].Name() - }) - } - - for _, key := range storesKeys { - storeParams := rs.storesParams[key] - commitID := rs.getCommitID(infos, key.Name()) - rs.logger.Debug("loadVersion commitID", "key", key, "ver", ver, "hash", fmt.Sprintf("%x", commitID.Hash)) - - // If it has been added, set the initial version - if upgrades.IsAdded(key.Name()) || upgrades.RenamedFrom(key.Name()) != "" { - storeParams.initialVersion = uint64(ver) + 1 - } else if commitID.Version != ver && storeParams.typ == types.StoreTypeIAVL { - return fmt.Errorf("version of store %q mismatch root store's version; expected %d got %d; new stores should be added using StoreUpgrades", key.Name(), ver, commitID.Version) - } - - store, err := rs.loadCommitStoreFromParams(key, commitID, storeParams) - if err != nil { - return errorsmod.Wrap(err, "failed to load store") - } - - newStores[key] = store - - // If it was deleted, remove all data - if upgrades.IsDeleted(key.Name()) { - if err := deleteKVStore(store.(types.KVStore)); err != nil { - return errorsmod.Wrapf(err, "failed to delete store %s", key.Name()) - } - rs.removalMap[key] = true - } else if oldName := upgrades.RenamedFrom(key.Name()); oldName != "" { - // handle renames specially - // make an unregistered key to satisfy loadCommitStore params - oldKey := types.NewKVStoreKey(oldName) - oldParams := newStoreParams(oldKey, storeParams.db, storeParams.typ, 0) - - // load from the old name - oldStore, err := rs.loadCommitStoreFromParams(oldKey, rs.getCommitID(infos, oldName), oldParams) - if err != nil { - return errorsmod.Wrapf(err, "failed to load old store %s", oldName) - } - - // move all data - if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil { - return errorsmod.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) - } - - // add the old key so its deletion is committed - newStores[oldKey] = oldStore - // this will ensure it's not perpetually stored in commitInfo - rs.removalMap[oldKey] = true - } - } - - rs.lastCommitInfo = cInfo - rs.stores = newStores - - // load any snapshot heights we missed from disk to be pruned on the next run - if err := rs.pruningManager.LoadSnapshotHeights(rs.db); err != nil { - return err - } - - return nil -} - -func (rs *Store) getCommitID(infos map[string]types.StoreInfo, name string) types.CommitID { - info, ok := infos[name] - if !ok { - return types.CommitID{} - } - - return info.CommitId -} - -func deleteKVStore(kv types.KVStore) error { - // Note that we cannot write while iterating, so load all keys here, delete below - var keys [][]byte - itr := kv.Iterator(nil, nil) - for itr.Valid() { - keys = append(keys, itr.Key()) - itr.Next() - } - if err := itr.Close(); err != nil { - return err - } - - for _, k := range keys { - kv.Delete(k) - } - return nil -} - -// we simulate move by a copy and delete -func moveKVStoreData(oldDB, newDB types.KVStore) error { - // we read from one and write to another - itr := oldDB.Iterator(nil, nil) - for itr.Valid() { - newDB.Set(itr.Key(), itr.Value()) - itr.Next() - } - if err := itr.Close(); err != nil { - return err - } - - // then delete the old store - return deleteKVStore(oldDB) -} - -// PruneSnapshotHeight prunes the given height according to the prune strategy. -// If the strategy is PruneNothing, this is a no-op. -// For other strategies, this height is persisted until the snapshot is operated. -func (rs *Store) PruneSnapshotHeight(height int64) { - rs.pruningManager.HandleSnapshotHeight(height) -} - -// SetInterBlockCache sets the Store's internal inter-block (persistent) cache. -// When this is defined, all CommitKVStores will be wrapped with their respective -// inter-block cache. -func (rs *Store) SetInterBlockCache(c types.MultiStorePersistentCache) { - rs.interBlockCache = c -} - -// SetTracer sets the tracer for the MultiStore that the underlying -// stores will utilize to trace operations. A MultiStore is returned. -func (rs *Store) SetTracer(w io.Writer) types.MultiStore { - rs.traceWriter = w - return rs -} - -// SetTracingContext updates the tracing context for the MultiStore by merging -// the given context with the existing context by key. Any existing keys will -// be overwritten. It is implied that the caller should update the context when -// necessary between tracing operations. It returns a modified MultiStore. -func (rs *Store) SetTracingContext(tc types.TraceContext) types.MultiStore { - rs.traceContextMutex.Lock() - defer rs.traceContextMutex.Unlock() - rs.traceContext = rs.traceContext.Merge(tc) - - return rs -} - -func (rs *Store) getTracingContext() types.TraceContext { - rs.traceContextMutex.Lock() - defer rs.traceContextMutex.Unlock() - - if rs.traceContext == nil { - return nil - } - - ctx := types.TraceContext{} - for k, v := range rs.traceContext { - ctx[k] = v - } - - return ctx -} - -// TracingEnabled returns if tracing is enabled for the MultiStore. -func (rs *Store) TracingEnabled() bool { - return rs.traceWriter != nil -} - -// AddListeners adds a listener for the KVStore belonging to the provided StoreKey -func (rs *Store) AddListeners(keys []types.StoreKey) { - for i := range keys { - listener := rs.listeners[keys[i]] - if listener == nil { - rs.listeners[keys[i]] = types.NewMemoryListener() - } - } -} - -// ListeningEnabled returns if listening is enabled for a specific KVStore -func (rs *Store) ListeningEnabled(key types.StoreKey) bool { - if ls, ok := rs.listeners[key]; ok { - return ls != nil - } - return false -} - -// PopStateCache returns the accumulated state change messages from the CommitMultiStore -// Calling PopStateCache destroys only the currently accumulated state in each listener -// not the state in the store itself. This is a mutating and destructive operation. -// This method has been synchronized. -func (rs *Store) PopStateCache() []*types.StoreKVPair { - var cache []*types.StoreKVPair - for key := range rs.listeners { - ls := rs.listeners[key] - if ls != nil { - cache = append(cache, ls.PopStateCache()...) - } - } - sort.SliceStable(cache, func(i, j int) bool { - return cache[i].StoreKey < cache[j].StoreKey - }) - return cache -} - -// LatestVersion returns the latest version in the store -func (rs *Store) LatestVersion() int64 { - return rs.LastCommitID().Version -} - -// LastCommitID implements Committer/CommitStore. -func (rs *Store) LastCommitID() types.CommitID { - if rs.lastCommitInfo == nil { - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - return types.CommitID{ - Version: GetLatestVersion(rs.db), - Hash: appHash, // set empty apphash to sha256([]byte{}) if info is nil - } - } - if len(rs.lastCommitInfo.CommitID().Hash) == 0 { - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - return types.CommitID{ - Version: rs.lastCommitInfo.Version, - Hash: appHash, // set empty apphash to sha256([]byte{}) if hash is nil - } - } - - return rs.lastCommitInfo.CommitID() -} - -// PausePruning temporarily pauses the pruning of all individual stores which implement -// the PausablePruner interface. -func (rs *Store) PausePruning(pause bool) { - for _, store := range rs.stores { - if pauseable, ok := store.(types.PausablePruner); ok { - pauseable.PausePruning(pause) - } - } -} - -// Commit implements Committer/CommitStore. -func (rs *Store) Commit() types.CommitID { - var previousHeight, version int64 - if rs.lastCommitInfo.GetVersion() == 0 && rs.initialVersion > 1 { - // This case means that no commit has been made in the store, we - // start from initialVersion. - version = rs.initialVersion - } else { - // This case can means two things: - // - either there was already a previous commit in the store, in which - // case we increment the version from there, - // - or there was no previous commit, and initial version was not set, - // in which case we start at version 1. - previousHeight = rs.lastCommitInfo.GetVersion() - version = previousHeight + 1 - } - - if rs.commitHeader.Height != version { - rs.logger.Debug("commit header and version mismatch", "header_height", rs.commitHeader.Height, "version", version) - } - - func() { // ensure unpause - // set the committing flag on all stores to block the pruning - rs.PausePruning(true) - // unset the committing flag on all stores to continue the pruning - defer rs.PausePruning(false) - rs.lastCommitInfo = commitStores(version, rs.stores, rs.removalMap) - }() - - rs.lastCommitInfo.Timestamp = rs.commitHeader.Time - defer rs.flushMetadata(rs.db, version, rs.lastCommitInfo) - - // remove remnants of removed stores - for sk := range rs.removalMap { - if _, ok := rs.stores[sk]; ok { - delete(rs.stores, sk) - delete(rs.storesParams, sk) - delete(rs.keysByName, sk.Name()) - } - } - - // reset the removalMap - rs.removalMap = make(map[types.StoreKey]bool) - - if err := rs.handlePruning(version); err != nil { - rs.logger.Error( - "failed to prune store, please check your pruning configuration", - "err", err, - ) - } - - return types.CommitID{ - Version: version, - Hash: rs.lastCommitInfo.Hash(), - } -} - -// WorkingHash returns the current hash of the store. -// it will be used to get the current app hash before commit. -func (rs *Store) WorkingHash() []byte { - storeInfos := make([]types.StoreInfo, 0, len(rs.stores)) - storeKeys := keysFromStoreKeyMap(rs.stores) - - for _, key := range storeKeys { - store := rs.stores[key] - - if store.GetStoreType() != types.StoreTypeIAVL { - continue - } - - if !rs.removalMap[key] { - si := types.StoreInfo{ - Name: key.Name(), - CommitId: types.CommitID{ - Hash: store.WorkingHash(), - }, - } - storeInfos = append(storeInfos, si) - } - } - - sort.SliceStable(storeInfos, func(i, j int) bool { - return storeInfos[i].Name < storeInfos[j].Name - }) - - return types.CommitInfo{StoreInfos: storeInfos}.Hash() -} - -// CacheWrap implements CacheWrapper/Store/CommitStore. -func (rs *Store) CacheWrap() types.CacheWrap { - return rs.CacheMultiStore().(types.CacheWrap) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - return rs.CacheWrap() -} - -// CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore. -// It implements the MultiStore interface. -func (rs *Store) CacheMultiStore() types.CacheMultiStore { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range rs.stores { - store := types.KVStore(v) - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(k) { - store = listenkv.NewStore(store, k, rs.listeners[k]) - } - stores[k] = store - } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) -} - -// CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it -// attempts to load stores at a given version (height). An error is returned if -// any store cannot be loaded. This should only be used for querying and -// iterating at past heights. -func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) { - cachedStores := make(map[types.StoreKey]types.CacheWrapper) - var commitInfo *types.CommitInfo - storeInfos := map[string]bool{} - for key, store := range rs.stores { - var cacheStore types.KVStore - switch store.GetStoreType() { - case types.StoreTypeIAVL: - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - store = rs.GetCommitKVStore(key) - - // Attempt to lazy-load an already saved IAVL store version. If the - // version does not exist or is pruned, an error should be returned. - var err error - cacheStore, err = store.(*iavl.Store).GetImmutable(version) - // if we got error from loading a module store - // we fetch commit info of this version - // we use commit info to check if the store existed at this version or not - if err != nil { - if commitInfo == nil { - var errCommitInfo error - commitInfo, errCommitInfo = rs.GetCommitInfo(version) - - if errCommitInfo != nil { - return nil, errCommitInfo - } - - for _, storeInfo := range commitInfo.StoreInfos { - storeInfos[storeInfo.Name] = true - } - } - - // If the store existed at this version, it means there's actually an error - // getting the root store at this version. - if storeInfos[key.Name()] { - return nil, err - } - - // If the store donesn't exist at this version, create a dummy one to prevent - // nil pointer panic in newer query APIs. - cacheStore = dbadapter.Store{DB: dbm.NewMemDB()} - } - - default: - cacheStore = store - } - - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(key) { - cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key]) - } - - cachedStores[key] = cacheStore - } - - return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil -} - -// GetStore returns a mounted Store for a given StoreKey. If the StoreKey does -// not exist, it will panic. If the Store is wrapped in an inter-block cache, it -// will be unwrapped prior to being returned. -// -// TODO: This isn't used directly upstream. Consider returning the Store as-is -// instead of unwrapping. -func (rs *Store) GetStore(key types.StoreKey) types.Store { - store := rs.GetCommitKVStore(key) - if store == nil { - panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) - } - - return store -} - -// GetKVStore returns a mounted KVStore for a given StoreKey. If tracing is -// enabled on the KVStore, a wrapped TraceKVStore will be returned with the root -// store's tracer, otherwise, the original KVStore will be returned. -// -// NOTE: The returned KVStore may be wrapped in an inter-block cache if it is -// set on the root store. -func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { - s := rs.stores[key] - if s == nil { - panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) - } - store := types.KVStore(s) - - if rs.TracingEnabled() { - store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) - } - if rs.ListeningEnabled(key) { - store = listenkv.NewStore(store, key, rs.listeners[key]) - } - - return store -} - -func (rs *Store) handlePruning(version int64) error { - pruneHeight := rs.pruningManager.GetPruningHeight(version) - rs.logger.Debug("prune start", "height", version) - defer rs.logger.Debug("prune end", "height", version) - return rs.PruneStores(pruneHeight) -} - -// PruneStores prunes all history up to the specific height of the multi store. -func (rs *Store) PruneStores(pruningHeight int64) (err error) { - if pruningHeight <= 0 { - rs.logger.Debug("pruning skipped, height is less than or equal to 0") - return nil - } - - rs.logger.Debug("pruning store", "heights", pruningHeight) - - for key, store := range rs.stores { - rs.logger.Debug("pruning store", "key", key) // Also log store.name (a private variable)? - - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - if store.GetStoreType() != types.StoreTypeIAVL { - continue - } - - store = rs.GetCommitKVStore(key) - - err := store.(*iavl.Store).DeleteVersionsTo(pruningHeight) - if err == nil { - continue - } - - if errors.Is(err, iavltree.ErrVersionDoesNotExist) { - return err - } - - rs.logger.Error("failed to prune store", "key", key, "err", err) - } - return nil -} - -// GetStoreByName performs a lookup of a StoreKey given a store name typically -// provided in a path. The StoreKey is then used to perform a lookup and return -// a Store. If the Store is wrapped in an inter-block cache, it will be unwrapped -// prior to being returned. If the StoreKey does not exist, nil is returned. -func (rs *Store) GetStoreByName(name string) types.Store { - key := rs.keysByName[name] - if key == nil { - return nil - } - - return rs.GetCommitKVStore(key) -} - -// Query calls substore.Query with the same `req` where `req.Path` is -// modified to remove the substore prefix. -// Ie. `req.Path` here is `//`, and trimmed to `/` for the substore. -// TODO: add proof for `multistore -> substore`. -func (rs *Store) Query(req *types.RequestQuery) (*types.ResponseQuery, error) { - path := req.Path - storeName, subpath, err := parsePath(path) - if err != nil { - return &types.ResponseQuery{}, err - } - - store := rs.GetStoreByName(storeName) - if store == nil { - return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "no such store: %s", storeName) - } - - queryable, ok := store.(types.Queryable) - if !ok { - return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "store %s (type %T) doesn't support queries", storeName, store) - } - - // trim the path and make the query - req.Path = subpath - res, err := queryable.Query(req) - - if !req.Prove || !RequireProof(subpath) { - return res, err - } - - if res.ProofOps == nil || len(res.ProofOps.Ops) == 0 { - return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrInvalidRequest, "proof is unexpectedly empty; ensure height has not been pruned") - } - - // If the request's height is the latest height we've committed, then utilize - // the store's lastCommitInfo as this commit info may not be flushed to disk. - // Otherwise, we query for the commit info from disk. - var commitInfo *types.CommitInfo - - if res.Height == rs.lastCommitInfo.Version { - commitInfo = rs.lastCommitInfo - } else { - commitInfo, err = rs.GetCommitInfo(res.Height) - if err != nil { - return &types.ResponseQuery{}, err - } - } - - // Restore origin path and append proof op. - res.ProofOps.Ops = append(res.ProofOps.Ops, commitInfo.ProofOp(storeName)) - - return res, nil -} - -// SetInitialVersion sets the initial version of the IAVL tree. It is used when -// starting a new chain at an arbitrary height. -func (rs *Store) SetInitialVersion(version int64) error { - rs.initialVersion = version - - // Loop through all the stores, if it's an IAVL store, then set initial - // version on it. - for key, store := range rs.stores { - if store.GetStoreType() == types.StoreTypeIAVL { - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - store = rs.GetCommitKVStore(key) - store.(types.StoreWithInitialVersion).SetInitialVersion(version) - } - } - - return nil -} - -// parsePath expects a format like /[/] -// Must start with /, subpath may be empty -// Returns error if it doesn't start with / -func parsePath(path string) (storeName, subpath string, err error) { - if !strings.HasPrefix(path, "/") { - return storeName, subpath, errorsmod.Wrapf(types.ErrUnknownRequest, "invalid path: %s", path) - } - - paths := strings.SplitN(path[1:], "/", 2) - storeName = paths[0] - - if len(paths) == 2 { - subpath = "/" + paths[1] - } - - return storeName, subpath, nil -} - -//---------------------- Snapshotting ------------------ - -// Snapshot implements snapshottypes.Snapshotter. The snapshot output for a given format must be -// identical across nodes such that chunks from different sources fit together. If the output for a -// given format changes (at the byte level), the snapshot format must be bumped - see -// TestMultistoreSnapshot_Checksum test. -func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error { - if height == 0 { - return errorsmod.Wrap(types.ErrLogic, "cannot snapshot height 0") - } - if height > uint64(GetLatestVersion(rs.db)) { - return errorsmod.Wrapf(types.ErrLogic, "cannot snapshot future height %v", height) - } - - // Collect stores to snapshot (only IAVL stores are supported) - type namedStore struct { - *iavl.Store - name string - } - stores := []namedStore{} - keys := keysFromStoreKeyMap(rs.stores) - for _, key := range keys { - switch store := rs.GetCommitKVStore(key).(type) { - case *iavl.Store: - stores = append(stores, namedStore{name: key.Name(), Store: store}) - case *transient.Store, *mem.Store: - // Non-persisted stores shouldn't be snapshotted - continue - default: - return errorsmod.Wrapf(types.ErrLogic, - "don't know how to snapshot store %q of type %T", key.Name(), store) - } - } - sort.Slice(stores, func(i, j int) bool { - return strings.Compare(stores[i].name, stores[j].name) == -1 - }) - - // Export each IAVL store. Stores are serialized as a stream of SnapshotItem Protobuf - // messages. The first item contains a SnapshotStore with store metadata (i.e. name), - // and the following messages contain a SnapshotNode (i.e. an ExportNode). Store changes - // are demarcated by new SnapshotStore items. - for _, store := range stores { - rs.logger.Debug("starting snapshot", "store", store.name, "height", height) - exporter, err := store.Export(int64(height)) - if err != nil { - rs.logger.Error("snapshot failed; exporter error", "store", store.name, "err", err) - return err - } - - err = func() error { - defer exporter.Close() - - err := protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ - Item: &snapshottypes.SnapshotItem_Store{ - Store: &snapshottypes.SnapshotStoreItem{ - Name: store.name, - }, - }, - }) - if err != nil { - rs.logger.Error("snapshot failed; item store write failed", "store", store.name, "err", err) - return err - } - - nodeCount := 0 - for { - node, err := exporter.Next() - if errors.Is(err, iavltree.ErrorExportDone) { - rs.logger.Debug("snapshot Done", "store", store.name, "nodeCount", nodeCount) - break - } else if err != nil { - return err - } - err = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ - Item: &snapshottypes.SnapshotItem_IAVL{ - IAVL: &snapshottypes.SnapshotIAVLItem{ - Key: node.Key, - Value: node.Value, - Height: int32(node.Height), - Version: node.Version, - }, - }, - }) - if err != nil { - return err - } - nodeCount++ - } - - return nil - }() - if err != nil { - return err - } - } - - return nil -} - -// Restore implements snapshottypes.Snapshotter. -// returns next snapshot item and error. -func (rs *Store) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshottypes.SnapshotItem, error) { - // Import nodes into stores. The first item is expected to be a SnapshotItem containing - // a SnapshotStoreItem, telling us which store to import into. The following items will contain - // SnapshotNodeItem (i.e. ExportNode) until we reach the next SnapshotStoreItem or EOF. - var importer *iavltree.Importer - var snapshotItem snapshottypes.SnapshotItem -loop: - for { - snapshotItem = snapshottypes.SnapshotItem{} - err := protoReader.ReadMsg(&snapshotItem) - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message") - } - - switch item := snapshotItem.Item.(type) { - case *snapshottypes.SnapshotItem_Store: - if importer != nil { - err = importer.Commit() - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL commit failed") - } - importer.Close() - } - store, ok := rs.GetStoreByName(item.Store.Name).(*iavl.Store) - if !ok || store == nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrapf(types.ErrLogic, "cannot import into non-IAVL store %q", item.Store.Name) - } - importer, err = store.Import(int64(height)) - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "import failed") - } - defer importer.Close() - // Importer height must reflect the node height (which usually matches the block height, but not always) - rs.logger.Debug("restoring snapshot", "store", item.Store.Name) - - case *snapshottypes.SnapshotItem_IAVL: - if importer == nil { - rs.logger.Error("failed to restore; received IAVL node item before store item") - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(types.ErrLogic, "received IAVL node item before store item") - } - if item.IAVL.Height > math.MaxInt8 { - return snapshottypes.SnapshotItem{}, errorsmod.Wrapf(types.ErrLogic, "node height %v cannot exceed %v", - item.IAVL.Height, math.MaxInt8) - } - node := &iavltree.ExportNode{ - Key: item.IAVL.Key, - Value: item.IAVL.Value, - Height: int8(item.IAVL.Height), - Version: item.IAVL.Version, - } - // Protobuf does not differentiate between []byte{} as nil, but fortunately IAVL does - // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty. - if node.Key == nil { - node.Key = []byte{} - } - if node.Height == 0 && node.Value == nil { - node.Value = []byte{} - } - err := importer.Add(node) - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL node import failed") - } - - default: - break loop - } - } - - if importer != nil { - err := importer.Commit() - if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "IAVL commit failed") - } - importer.Close() - } - - rs.flushMetadata(rs.db, int64(height), rs.buildCommitInfo(int64(height))) - return snapshotItem, rs.LoadLatestVersion() -} - -func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) { - var db dbm.DB - - if params.db != nil { - db = dbm.NewPrefixDB(params.db, []byte("s/_/")) - } else { - prefix := "s/k:" + params.key.Name() + "/" - db = dbm.NewPrefixDB(rs.db, []byte(prefix)) - } - - switch params.typ { - case types.StoreTypeMulti: - panic("recursive MultiStores not yet supported") - - case types.StoreTypeIAVL: - var store types.CommitKVStore - var err error - - if params.initialVersion == 0 { - store, err = iavl.LoadStore(db, rs.logger, key, id, rs.iavlCacheSize, rs.iavlDisableFastNode, rs.metrics) - } else { - store, err = iavl.LoadStoreWithInitialVersion(db, rs.logger, key, id, params.initialVersion, rs.iavlCacheSize, rs.iavlDisableFastNode, rs.metrics) - } - - if err != nil { - return nil, err - } - - if rs.interBlockCache != nil { - // Wrap and get a CommitKVStore with inter-block caching. Note, this should - // only wrap the primary CommitKVStore, not any store that is already - // branched as that will create unexpected behavior. - store = rs.interBlockCache.GetStoreCache(key, store) - } - - return store, err - - case types.StoreTypeDB: - return commitDBStoreAdapter{Store: dbadapter.Store{DB: db}}, nil - - case types.StoreTypeTransient: - _, ok := key.(*types.TransientStoreKey) - if !ok { - return nil, fmt.Errorf("invalid StoreKey for StoreTypeTransient: %s", key.String()) - } - - return transient.NewStore(), nil - - case types.StoreTypeMemory: - if _, ok := key.(*types.MemoryStoreKey); !ok { - return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s", key.String()) - } - - return mem.NewStore(), nil - - default: - panic(fmt.Sprintf("unrecognized store type %v", params.typ)) - } -} - -func (rs *Store) buildCommitInfo(version int64) *types.CommitInfo { - keys := keysFromStoreKeyMap(rs.stores) - storeInfos := []types.StoreInfo{} - for _, key := range keys { - store := rs.stores[key] - storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { - continue - } - storeInfos = append(storeInfos, types.StoreInfo{ - Name: key.Name(), - CommitId: store.LastCommitID(), - }) - } - return &types.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } -} - -// RollbackToVersion delete the versions after `target` and update the latest version. -func (rs *Store) RollbackToVersion(target int64) error { - if target <= 0 { - return fmt.Errorf("invalid rollback height target: %d", target) - } - - for key, store := range rs.stores { - if store.GetStoreType() == types.StoreTypeIAVL { - // If the store is wrapped with an inter-block cache, we must first unwrap - // it to get the underlying IAVL store. - store = rs.GetCommitKVStore(key) - err := store.(*iavl.Store).LoadVersionForOverwriting(target) - if err != nil { - return err - } - } - } - - rs.flushMetadata(rs.db, target, rs.buildCommitInfo(target)) - - return rs.LoadLatestVersion() -} - -// SetCommitHeader sets the commit block header of the store. -func (rs *Store) SetCommitHeader(h cmtproto.Header) { - rs.commitHeader = h -} - -// GetCommitInfo attempts to retrieve CommitInfo for a given version/height. It -// will return an error if no CommitInfo exists, we fail to unmarshal the record -// or if we cannot retrieve the object from the DB. -func (rs *Store) GetCommitInfo(ver int64) (*types.CommitInfo, error) { - cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver) - - bz, err := rs.db.Get([]byte(cInfoKey)) - if err != nil { - return nil, errorsmod.Wrap(err, "failed to get commit info") - } else if bz == nil { - return nil, errors.New("no commit info found") - } - - cInfo := &types.CommitInfo{} - if err = cInfo.Unmarshal(bz); err != nil { - return nil, errorsmod.Wrap(err, "failed unmarshal commit info") - } - - return cInfo, nil -} - -func (rs *Store) flushMetadata(db dbm.DB, version int64, cInfo *types.CommitInfo) { - rs.logger.Debug("flushing metadata", "height", version) - batch := db.NewBatch() - defer func() { - _ = batch.Close() - }() - - if cInfo != nil { - flushCommitInfo(batch, version, cInfo) - } else { - rs.logger.Debug("commitInfo is nil, not flushed", "height", version) - } - - flushLatestVersion(batch, version) - - if err := batch.WriteSync(); err != nil { - panic(fmt.Errorf("error on batch write %w", err)) - } - rs.logger.Debug("flushing metadata finished", "height", version) -} - -type storeParams struct { - key types.StoreKey - db dbm.DB - typ types.StoreType - initialVersion uint64 -} - -func newStoreParams(key types.StoreKey, db dbm.DB, typ types.StoreType, initialVersion uint64) storeParams { - return storeParams{ - key: key, - db: db, - typ: typ, - initialVersion: initialVersion, - } -} - -func GetLatestVersion(db dbm.DB) int64 { - bz, err := db.Get([]byte(latestVersionKey)) - if err != nil { - panic(err) - } else if bz == nil { - return 0 - } - - var latestVersion int64 - - if err := gogotypes.StdInt64Unmarshal(&latestVersion, bz); err != nil { - panic(err) - } - - return latestVersion -} - -// Commits each store and returns a new commitInfo. -func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { - storeInfos := make([]types.StoreInfo, 0, len(storeMap)) - storeKeys := keysFromStoreKeyMap(storeMap) - - for _, key := range storeKeys { - store := storeMap[key] - last := store.LastCommitID() - - // If a commit event execution is interrupted, a new iavl store's version - // will be larger than the RMS's metadata, when the block is replayed, we - // should avoid committing that iavl store again. - var commitID types.CommitID - if last.Version >= version { - last.Version = version - commitID = last - } else { - commitID = store.Commit() - } - - storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { - continue - } - - if !removalMap[key] { - si := types.StoreInfo{} - si.Name = key.Name() - si.CommitId = commitID - storeInfos = append(storeInfos, si) - } - } - - sort.SliceStable(storeInfos, func(i, j int) bool { - return strings.Compare(storeInfos[i].Name, storeInfos[j].Name) < 0 - }) - - return &types.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } -} - -func flushCommitInfo(batch dbm.Batch, version int64, cInfo *types.CommitInfo) { - bz, err := cInfo.Marshal() - if err != nil { - panic(err) - } - - cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version) - err = batch.Set([]byte(cInfoKey), bz) - if err != nil { - panic(err) - } -} - -func flushLatestVersion(batch dbm.Batch, version int64) { - bz, err := gogotypes.StdInt64Marshal(version) - if err != nil { - panic(err) - } - - err = batch.Set([]byte(latestVersionKey), bz) - if err != nil { - panic(err) - } -} diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go deleted file mode 100644 index be23d3deda..0000000000 --- a/store/rootmulti/store_test.go +++ /dev/null @@ -1,1034 +0,0 @@ -package rootmulti - -import ( - "bytes" - "crypto/sha256" - "fmt" - "testing" - "time" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/store/cachemulti" - "cosmossdk.io/store/iavl" - sdkmaps "cosmossdk.io/store/internal/maps" - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -func TestStoreType(t *testing.T) { - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, db) -} - -func TestGetCommitKVStore(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) - err := ms.LoadLatestVersion() - require.Nil(t, err) - - key := ms.keysByName["store1"] - - store1 := ms.GetCommitKVStore(key) - require.NotNil(t, store1) - require.IsType(t, &iavl.Store{}, store1) - - store2 := ms.GetCommitStore(key) - require.NotNil(t, store2) - require.IsType(t, &iavl.Store{}, store2) -} - -func TestStoreMount(t *testing.T) { - db := dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - - key1 := types.NewKVStoreKey("store1") - key2 := types.NewKVStoreKey("store2") - dup1 := types.NewKVStoreKey("store1") - - require.NotPanics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) }) - require.NotPanics(t, func() { store.MountStoreWithDB(key2, types.StoreTypeIAVL, db) }) - - require.Panics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) }) - require.Panics(t, func() { store.MountStoreWithDB(nil, types.StoreTypeIAVL, db) }) - require.Panics(t, func() { store.MountStoreWithDB(dup1, types.StoreTypeIAVL, db) }) -} - -func TestCacheMultiStore(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - cacheMulti := ms.CacheMultiStore() - require.IsType(t, cachemulti.Store{}, cacheMulti) -} - -func TestCacheMultiStoreWithVersion(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := ms.LoadLatestVersion() - require.Nil(t, err) - - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - commitID := types.CommitID{Hash: appHash} - checkStore(t, ms, commitID, commitID) - - k, v := []byte("wind"), []byte("blows") - - store1 := ms.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - cID := ms.Commit() - require.Equal(t, int64(1), cID.Version) - - // require no failure when given an invalid or pruned version - _, err = ms.CacheMultiStoreWithVersion(cID.Version + 1) - require.Error(t, err) - - // require a valid version can be cache-loaded - cms, err := ms.CacheMultiStoreWithVersion(cID.Version) - require.NoError(t, err) - - // require a valid key lookup yields the correct value - kvStore := cms.GetKVStore(ms.keysByName["store1"]) - require.NotNil(t, kvStore) - require.Equal(t, kvStore.Get(k), v) - - // add new module stores (store4 and store5) to multi stores and commit - key4, key5 := types.NewKVStoreKey("store4"), types.NewKVStoreKey("store5") - ms.MountStoreWithDB(key4, types.StoreTypeIAVL, nil) - ms.MountStoreWithDB(key5, types.StoreTypeIAVL, nil) - err = ms.LoadLatestVersionAndUpgrade(&types.StoreUpgrades{Added: []string{"store4", "store5"}}) - require.NoError(t, err) - ms.Commit() - - // cache multistore of version before adding store4 should works - cms2, err := ms.CacheMultiStoreWithVersion(1) - require.NoError(t, err) - - require.Empty(t, cms2.GetKVStore(key4).Get([]byte("key"))) - - // require we cannot commit (write) to a cache-versioned multi-store - require.Panics(t, func() { - kvStore.Set(k, []byte("newValue")) - cms.Write() - }) -} - -func TestHashStableWithEmptyCommit(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := ms.LoadLatestVersion() - require.Nil(t, err) - - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - commitID := types.CommitID{Hash: appHash} - checkStore(t, ms, commitID, commitID) - - k, v := []byte("wind"), []byte("blows") - - store1 := ms.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - workingHash := ms.WorkingHash() - cID := ms.Commit() - require.Equal(t, int64(1), cID.Version) - hash := cID.Hash - require.Equal(t, workingHash, hash) - - // make an empty commit, it should update version, but not affect hash - workingHash = ms.WorkingHash() - cID = ms.Commit() - require.Equal(t, workingHash, cID.Hash) - require.Equal(t, int64(2), cID.Version) - require.Equal(t, hash, cID.Hash) -} - -func TestMultistoreCommitLoad(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := store.LoadLatestVersion() - require.Nil(t, err) - - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - // New store has empty last commit. - commitID := types.CommitID{Hash: appHash} - checkStore(t, store, commitID, commitID) - - // Make sure we can get stores by name. - s1 := store.GetStoreByName("store1") - require.NotNil(t, s1) - s3 := store.GetStoreByName("store3") - require.NotNil(t, s3) - s77 := store.GetStoreByName("store77") - require.Nil(t, s77) - - // Make a few commits and check them. - nCommits := int64(3) - for i := int64(0); i < nCommits; i++ { - workingHash := store.WorkingHash() - commitID = store.Commit() - require.Equal(t, workingHash, commitID.Hash) - expectedCommitID := getExpectedCommitID(store, i+1) - checkStore(t, store, expectedCommitID, commitID) - } - - // Load the latest multistore again and check version. - store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = store.LoadLatestVersion() - require.Nil(t, err) - commitID = getExpectedCommitID(store, nCommits) - checkStore(t, store, commitID, commitID) - - // Commit and check version. - workingHash := store.WorkingHash() - commitID = store.Commit() - require.Equal(t, workingHash, commitID.Hash) - expectedCommitID := getExpectedCommitID(store, nCommits+1) - checkStore(t, store, expectedCommitID, commitID) - - // Load an older multistore and check version. - ver := nCommits - 1 - store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = store.LoadVersion(ver) - require.Nil(t, err) - commitID = getExpectedCommitID(store, ver) - checkStore(t, store, commitID, commitID) -} - -func TestMultistoreLoadWithUpgrade(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := store.LoadLatestVersion() - require.Nil(t, err) - - // write some data in all stores - k1, v1 := []byte("first"), []byte("store") - s1, _ := store.GetStoreByName("store1").(types.KVStore) - require.NotNil(t, s1) - s1.Set(k1, v1) - - k2, v2 := []byte("second"), []byte("restore") - s2, _ := store.GetStoreByName("store2").(types.KVStore) - require.NotNil(t, s2) - s2.Set(k2, v2) - - k3, v3 := []byte("third"), []byte("dropped") - s3, _ := store.GetStoreByName("store3").(types.KVStore) - require.NotNil(t, s3) - s3.Set(k3, v3) - - s4, _ := store.GetStoreByName("store4").(types.KVStore) - require.Nil(t, s4) - - // do one commit - workingHash := store.WorkingHash() - commitID := store.Commit() - require.Equal(t, workingHash, commitID.Hash) - expectedCommitID := getExpectedCommitID(store, 1) - checkStore(t, store, expectedCommitID, commitID) - - ci, err := store.GetCommitInfo(1) - require.NoError(t, err) - require.Equal(t, int64(1), ci.Version) - require.Equal(t, 3, len(ci.StoreInfos)) - checkContains(t, ci.StoreInfos, []string{"store1", "store2", "store3"}) - - // Load without changes and make sure it is sensible - store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - err = store.LoadLatestVersion() - require.Nil(t, err) - commitID = getExpectedCommitID(store, 1) - checkStore(t, store, commitID, commitID) - - // let's query data to see it was saved properly - s2, _ = store.GetStoreByName("store2").(types.KVStore) - require.NotNil(t, s2) - require.Equal(t, v2, s2.Get(k2)) - - // now, let's load with upgrades... - restore, upgrades := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = restore.LoadLatestVersionAndUpgrade(upgrades) - require.Nil(t, err) - - // s1 was not changed - s1, _ = restore.GetStoreByName("store1").(types.KVStore) - require.NotNil(t, s1) - require.Equal(t, v1, s1.Get(k1)) - - // store3 is mounted, but data deleted are gone - s3, _ = restore.GetStoreByName("store3").(types.KVStore) - require.NotNil(t, s3) - require.Nil(t, s3.Get(k3)) // data was deleted - - // store4 is mounted, with empty data - s4, _ = restore.GetStoreByName("store4").(types.KVStore) - require.NotNil(t, s4) - - iterator := s4.Iterator(nil, nil) - - values := 0 - for ; iterator.Valid(); iterator.Next() { - values++ - } - require.Zero(t, values) - - require.NoError(t, iterator.Close()) - - // write something inside store4 - k4, v4 := []byte("fourth"), []byte("created") - s4.Set(k4, v4) - - // store2 is no longer mounted - st2 := restore.GetStoreByName("store2") - require.Nil(t, st2) - - // restore2 has the old data - rs2, _ := restore.GetStoreByName("restore2").(types.KVStore) - require.NotNil(t, rs2) - require.Equal(t, v2, rs2.Get(k2)) - - // store this migrated data, and load it again without migrations - migratedID := restore.Commit() - require.Equal(t, migratedID.Version, int64(2)) - - reload, _ := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - // unmount store3 since store3 was deleted - unmountStore(reload, "store3") - - rs3, _ := reload.GetStoreByName("store3").(types.KVStore) - require.Nil(t, rs3) - - err = reload.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, migratedID, reload.LastCommitID()) - - // query this new store - rl1, _ := reload.GetStoreByName("store1").(types.KVStore) - require.NotNil(t, rl1) - require.Equal(t, v1, rl1.Get(k1)) - - rl2, _ := reload.GetStoreByName("restore2").(types.KVStore) - require.NotNil(t, rl2) - require.Equal(t, v2, rl2.Get(k2)) - - rl4, _ := reload.GetStoreByName("store4").(types.KVStore) - require.NotNil(t, rl4) - require.Equal(t, v4, rl4.Get(k4)) - - // check commitInfo in storage - ci, err = reload.GetCommitInfo(2) - require.NoError(t, err) - require.Equal(t, int64(2), ci.Version) - require.Equal(t, 3, len(ci.StoreInfos), ci.StoreInfos) - checkContains(t, ci.StoreInfos, []string{"store1", "restore2", "store4"}) -} - -func TestParsePath(t *testing.T) { - _, _, err := parsePath("foo") - require.Error(t, err) - - store, subpath, err := parsePath("/foo") - require.NoError(t, err) - require.Equal(t, store, "foo") - require.Equal(t, subpath, "") - - store, subpath, err = parsePath("/fizz/bang/baz") - require.NoError(t, err) - require.Equal(t, store, "fizz") - require.Equal(t, subpath, "/bang/baz") - - substore, subsubpath, err := parsePath(subpath) - require.NoError(t, err) - require.Equal(t, substore, "bang") - require.Equal(t, subsubpath, "/baz") -} - -func TestMultiStoreRestart(t *testing.T) { - db := dbm.NewMemDB() - pruning := pruningtypes.NewCustomPruningOptions(2, 1) - multi := newMultiStoreWithMounts(db, pruning) - err := multi.LoadLatestVersion() - require.Nil(t, err) - - initCid := multi.LastCommitID() - - k, v := "wind", "blows" - k2, v2 := "water", "flows" - k3, v3 := "fire", "burns" - - for i := 1; i < 3; i++ { - // Set and commit data in one store. - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, i))) - - // ... and another. - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, i))) - - // ... and another. - store3 := multi.GetStoreByName("store3").(types.KVStore) - store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, i))) - - multi.Commit() - - cinfo, err := multi.GetCommitInfo(int64(i)) - require.NoError(t, err) - require.Equal(t, int64(i), cinfo.Version) - } - - // Set and commit data in one store. - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, 3))) - - // ... and another. - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, 3))) - - multi.Commit() - - flushedCinfo, err := multi.GetCommitInfo(3) - require.Nil(t, err) - require.NotEqual(t, initCid, flushedCinfo, "CID is different after flush to disk") - - // ... and another. - store3 := multi.GetStoreByName("store3").(types.KVStore) - store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, 3))) - - multi.Commit() - - postFlushCinfo, err := multi.GetCommitInfo(4) - require.NoError(t, err) - require.Equal(t, int64(4), postFlushCinfo.Version, "Commit changed after in-memory commit") - - multi = newMultiStoreWithMounts(db, pruning) - err = multi.LoadLatestVersion() - require.Nil(t, err) - - reloadedCid := multi.LastCommitID() - require.Equal(t, int64(4), reloadedCid.Version, "Reloaded CID is not the same as last flushed CID") - - // Check that store1 and store2 retained date from 3rd commit - store1 = multi.GetStoreByName("store1").(types.KVStore) - val := store1.Get([]byte(k)) - require.Equal(t, []byte(fmt.Sprintf("%s:%d", v, 3)), val, "Reloaded value not the same as last flushed value") - - store2 = multi.GetStoreByName("store2").(types.KVStore) - val2 := store2.Get([]byte(k2)) - require.Equal(t, []byte(fmt.Sprintf("%s:%d", v2, 3)), val2, "Reloaded value not the same as last flushed value") - - // Check that store3 still has data from last commit even though update happened on 2nd commit - store3 = multi.GetStoreByName("store3").(types.KVStore) - val3 := store3.Get([]byte(k3)) - require.Equal(t, []byte(fmt.Sprintf("%s:%d", v3, 3)), val3, "Reloaded value not the same as last flushed value") -} - -func TestMultiStoreQuery(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := multi.LoadLatestVersion() - require.Nil(t, err) - - k, v := []byte("wind"), []byte("blows") - k2, v2 := []byte("water"), []byte("flows") - // v3 := []byte("is cold") - - // Commit the multistore. - _ = multi.Commit() - - // Make sure we can get by name. - garbage := multi.GetStoreByName("bad-name") - require.Nil(t, garbage) - - // Set and commit data in one store. - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - // ... and another. - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set(k2, v2) - - // Commit the multistore. - cid := multi.Commit() - ver := cid.Version - - // Reload multistore from database - multi = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err = multi.LoadLatestVersion() - require.Nil(t, err) - - // Test bad path. - query := types.RequestQuery{Path: "/key", Data: k, Height: ver} - _, err = multi.Query(&query) - codespace, code, _ := errors.ABCIInfo(err, false) - require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) - require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) - - query.Path = "h897fy32890rf63296r92" - _, err = multi.Query(&query) - codespace, code, _ = errors.ABCIInfo(err, false) - require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) - require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) - - // Test invalid store name. - query.Path = "/garbage/key" - _, err = multi.Query(&query) - codespace, code, _ = errors.ABCIInfo(err, false) - require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code) - require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace) - - // Test valid query with data. - query.Path = "/store1/key" - qres, err := multi.Query(&query) - require.NoError(t, err) - require.Equal(t, v, qres.Value) - - // Test valid but empty query. - query.Path = "/store2/key" - query.Prove = true - qres, err = multi.Query(&query) - require.NoError(t, err) - require.Nil(t, qres.Value) - - // Test store2 data. - // Since we are using the request as a reference, the path will be modified. - query.Data = k2 - query.Path = "/store2/key" - qres, err = multi.Query(&query) - require.NoError(t, err) - require.Equal(t, v2, qres.Value) -} - -func TestMultiStore_Pruning(t *testing.T) { - testCases := []struct { - name string - numVersions int64 - po pruningtypes.PruningOptions - deleted []int64 - saved []int64 - }{ - {"prune nothing", 10, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - {"prune everything", 12, pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []int64{1, 2, 3, 4, 5, 6, 7}, []int64{8, 9, 10, 11, 12}}, - {"prune some; no batch", 10, pruningtypes.NewCustomPruningOptions(2, 1), []int64{1, 2, 3, 4, 6, 5, 7}, []int64{8, 9, 10}}, - {"prune some; small batch", 10, pruningtypes.NewCustomPruningOptions(2, 3), []int64{1, 2, 3, 4, 5, 6}, []int64{7, 8, 9, 10}}, - {"prune some; large batch", 10, pruningtypes.NewCustomPruningOptions(2, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - } - - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - db := dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, tc.po) - require.NoError(t, ms.LoadLatestVersion()) - - for i := int64(0); i < tc.numVersions; i++ { - ms.Commit() - } - - for _, v := range tc.deleted { - // Ensure async pruning is done - checkErr := func() bool { - _, err := ms.CacheMultiStoreWithVersion(v) - return err != nil - } - require.Eventually(t, checkErr, 1*time.Second, 10*time.Millisecond, "expected error when loading height: %d", v) - } - - for _, v := range tc.saved { - _, err := ms.CacheMultiStoreWithVersion(v) - require.NoError(t, err, "expected no error when loading height: %d", v) - } - }) - } -} - -func TestMultiStore_Pruning_SameHeightsTwice(t *testing.T) { - const ( - numVersions int64 = 10 - keepRecent uint64 = 2 - interval uint64 = 10 - ) - - db := dbm.NewMemDB() - - ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(keepRecent, interval)) - require.NoError(t, ms.LoadLatestVersion()) - - var lastCommitInfo types.CommitID - for i := int64(0); i < numVersions; i++ { - lastCommitInfo = ms.Commit() - } - - require.Equal(t, numVersions, lastCommitInfo.Version) - - // Get latest - err := ms.LoadVersion(numVersions - 1) - require.NoError(t, err) - - // Ensure already pruned snapshot heights were loaded - require.NoError(t, ms.pruningManager.LoadSnapshotHeights(db)) - - // Test pruning the same heights again - lastCommitInfo = ms.Commit() - require.Equal(t, numVersions, lastCommitInfo.Version) - - // Ensure that can commit one more height with no panic - lastCommitInfo = ms.Commit() - require.Equal(t, numVersions+1, lastCommitInfo.Version) - - isPruned := func() bool { - ls := ms.Commit() // to flush the batch with the pruned heights - for v := int64(1); v < numVersions-int64(keepRecent); v++ { - if err := ms.LoadVersion(v); err == nil { - require.NoError(t, ms.LoadVersion(ls.Version)) // load latest - return false - } - } - return true - } - require.Eventually(t, isPruned, 1000*time.Second, 10*time.Millisecond, "expected error when loading pruned heights") -} - -func TestMultiStore_PruningRestart(t *testing.T) { - db := dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) - require.NoError(t, ms.LoadLatestVersion()) - - // Commit enough to build up heights to prune, where on the next block we should - // batch delete. - for i := int64(0); i < 10; i++ { - ms.Commit() - } - - actualHeightToPrune := ms.pruningManager.GetPruningHeight(ms.LatestVersion()) - require.Equal(t, int64(0), actualHeightToPrune) - - // "restart" - ms = newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11)) - err := ms.LoadLatestVersion() - require.NoError(t, err) - - actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion()) - require.Equal(t, int64(0), actualHeightToPrune) - - // commit one more block and ensure the heights have been pruned - ms.Commit() - - actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion()) - require.Equal(t, int64(8), actualHeightToPrune) - - // Ensure async pruning is done - isPruned := func() bool { - ms.Commit() // to flush the batch with the pruned heights - for v := int64(1); v <= actualHeightToPrune; v++ { - if _, err := ms.CacheMultiStoreWithVersion(v); err == nil { - return false - } - } - return true - } - - require.Eventually(t, isPruned, 1*time.Second, 10*time.Millisecond, "expected error when loading pruned heights") -} - -var _ types.PausablePruner = &pauseableCommitKVStoreStub{} - -type pauseableCommitKVStoreStub struct { - types.CommitKVStore - pauseCalled []bool -} - -func (p *pauseableCommitKVStoreStub) PausePruning(b bool) { - p.pauseCalled = append(p.pauseCalled, b) -} - -func TestPausePruningOnCommit(t *testing.T) { - store := NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.SetPruning(pruningtypes.NewCustomPruningOptions(2, 11)) - store.MountStoreWithDB(testStoreKey1, types.StoreTypeIAVL, nil) - require.NoError(t, store.LoadLatestVersion()) - - myStub := &pauseableCommitKVStoreStub{CommitKVStore: store.stores[testStoreKey1]} - store.stores[testStoreKey1] = myStub - // when - store.Commit() - // then - require.Equal(t, []bool{true, false}, myStub.pauseCalled) -} - -// TestUnevenStoresHeightCheck tests if loading root store correctly errors when -// there's any module store with the wrong height -func TestUnevenStoresHeightCheck(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := store.LoadLatestVersion() - require.Nil(t, err) - - // commit to increment store's height - store.Commit() - - // mount store4 to root store - store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) - - // load the stores without upgrades - err = store.LoadLatestVersion() - require.Error(t, err) - - // now, let's load with upgrades... - upgrades := &types.StoreUpgrades{ - Added: []string{"store4"}, - } - err = store.LoadLatestVersionAndUpgrade(upgrades) - require.Nil(t, err) -} - -func TestSetInitialVersion(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - require.NoError(t, multi.LoadLatestVersion()) - - err := multi.SetInitialVersion(5) - require.NoError(t, err) - require.Equal(t, int64(5), multi.initialVersion) - - multi.Commit() - require.Equal(t, int64(5), multi.LastCommitID().Version) - - ckvs := multi.GetCommitKVStore(multi.keysByName["store1"]) - iavlStore, ok := ckvs.(*iavl.Store) - require.True(t, ok) - require.True(t, iavlStore.VersionExists(5)) -} - -func TestAddListenersAndListeningEnabled(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - testKey := types.NewKVStoreKey("listening_test_key") - enabled := multi.ListeningEnabled(testKey) - require.False(t, enabled) - - wrongTestKey := types.NewKVStoreKey("wrong_listening_test_key") - multi.AddListeners([]types.StoreKey{testKey}) - enabled = multi.ListeningEnabled(wrongTestKey) - require.False(t, enabled) - - enabled = multi.ListeningEnabled(testKey) - require.True(t, enabled) -} - -func TestCacheWraps(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - cacheWrapper := multi.CacheWrap() - require.IsType(t, cachemulti.Store{}, cacheWrapper) - - cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil) - require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace) -} - -func TestTraceConcurrency(t *testing.T) { - db := dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := multi.LoadLatestVersion() - require.NoError(t, err) - - b := &bytes.Buffer{} - key := multi.keysByName["store1"] - tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) - - multi.SetTracer(b) - multi.SetTracingContext(tc) - - cms := multi.CacheMultiStore() - store1 := cms.GetKVStore(key) - cw := store1.CacheWrapWithTrace(b, tc) - _ = cw - require.NotNil(t, store1) - - stop := make(chan struct{}) - stopW := make(chan struct{}) - - go func(stop chan struct{}) { - for { - select { - case <-stop: - return - default: - store1.Set([]byte{1}, []byte{1}) - cms.Write() - } - } - }(stop) - - go func(stop chan struct{}) { - for { - select { - case <-stop: - return - default: - multi.SetTracingContext(tc) - } - } - }(stopW) - - time.Sleep(3 * time.Second) - stop <- struct{}{} - stopW <- struct{}{} -} - -func TestCommitOrdered(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - err := multi.LoadLatestVersion() - require.Nil(t, err) - - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - commitID := types.CommitID{Hash: appHash} - checkStore(t, multi, commitID, commitID) - - k, v := []byte("wind"), []byte("blows") - k2, v2 := []byte("water"), []byte("flows") - k3, v3 := []byte("fire"), []byte("burns") - - store1 := multi.GetStoreByName("store1").(types.KVStore) - store1.Set(k, v) - - store2 := multi.GetStoreByName("store2").(types.KVStore) - store2.Set(k2, v2) - - store3 := multi.GetStoreByName("store3").(types.KVStore) - store3.Set(k3, v3) - - typeID := multi.Commit() - require.Equal(t, int64(1), typeID.Version) - - ci, err := multi.GetCommitInfo(1) - require.NoError(t, err) - require.Equal(t, int64(1), ci.Version) - require.Equal(t, 3, len(ci.StoreInfos)) - for i, s := range ci.StoreInfos { - require.Equal(t, s.Name, fmt.Sprintf("store%d", i+1)) - } -} - -//----------------------------------------------------------------------- -// utils - -var ( - testStoreKey1 = types.NewKVStoreKey("store1") - testStoreKey2 = types.NewKVStoreKey("store2") - testStoreKey3 = types.NewKVStoreKey("store3") -) - -func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store { - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.SetPruning(pruningOpts) - - store.MountStoreWithDB(testStoreKey1, types.StoreTypeIAVL, nil) - store.MountStoreWithDB(testStoreKey2, types.StoreTypeIAVL, nil) - store.MountStoreWithDB(testStoreKey3, types.StoreTypeIAVL, nil) - - return store -} - -func newMultiStoreWithModifiedMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) (*Store, *types.StoreUpgrades) { - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.SetPruning(pruningOpts) - - store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("restore2"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("store3"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil) - - upgrades := &types.StoreUpgrades{ - Added: []string{"store4"}, - Renamed: []types.StoreRename{{ - OldKey: "store2", - NewKey: "restore2", - }}, - Deleted: []string{"store3"}, - } - - return store, upgrades -} - -func unmountStore(rootStore *Store, storeKeyName string) { - sk := rootStore.keysByName[storeKeyName] - delete(rootStore.stores, sk) - delete(rootStore.storesParams, sk) - delete(rootStore.keysByName, storeKeyName) -} - -func checkStore(t *testing.T, store *Store, expect, got types.CommitID) { - t.Helper() - require.Equal(t, expect, got) - require.Equal(t, expect, store.LastCommitID()) -} - -func checkContains(tb testing.TB, info []types.StoreInfo, wanted []string) { - tb.Helper() - - for _, want := range wanted { - checkHas(tb, info, want) - } -} - -func checkHas(tb testing.TB, info []types.StoreInfo, want string) { - tb.Helper() - for _, i := range info { - if i.Name == want { - return - } - } - tb.Fatalf("storeInfo doesn't contain %s", want) -} - -func getExpectedCommitID(store *Store, ver int64) types.CommitID { - return types.CommitID{ - Version: ver, - Hash: hashStores(store.stores), - } -} - -func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte { - m := make(map[string][]byte, len(stores)) - for key, store := range stores { - name := key.Name() - m[name] = types.StoreInfo{ - Name: name, - CommitId: store.LastCommitID(), - }.GetHash() - } - return sdkmaps.HashFromMap(m) -} - -type MockListener struct { - stateCache []types.StoreKVPair -} - -func (tl *MockListener) OnWrite(storeKey types.StoreKey, key, value []byte, delete bool) error { - tl.stateCache = append(tl.stateCache, types.StoreKVPair{ - StoreKey: storeKey.Name(), - Key: key, - Value: value, - Delete: delete, - }) - return nil -} - -func TestStateListeners(t *testing.T) { - var db dbm.DB = dbm.NewMemDB() - ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - require.Empty(t, ms.listeners) - - ms.AddListeners([]types.StoreKey{testStoreKey1}) - require.Equal(t, 1, len(ms.listeners)) - - require.NoError(t, ms.LoadLatestVersion()) - cacheMulti := ms.CacheMultiStore() - - store := cacheMulti.GetKVStore(testStoreKey1) - store.Set([]byte{1}, []byte{1}) - require.Empty(t, ms.PopStateCache()) - - // writes are observed when cache store commit. - cacheMulti.Write() - require.Equal(t, 1, len(ms.PopStateCache())) - - // test no listening on unobserved store - store = cacheMulti.GetKVStore(testStoreKey2) - store.Set([]byte{1}, []byte{1}) - require.Empty(t, ms.PopStateCache()) - - // writes are not observed when cache store commit - cacheMulti.Write() - require.Empty(t, ms.PopStateCache()) -} - -type commitKVStoreStub struct { - types.CommitKVStore - Committed int -} - -func (stub *commitKVStoreStub) Commit() types.CommitID { - commitID := stub.CommitKVStore.Commit() - stub.Committed++ - return commitID -} - -func prepareStoreMap() (map[types.StoreKey]types.CommitKVStore, error) { - var db dbm.DB = dbm.NewMemDB() - store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) - store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) - if err := store.LoadLatestVersion(); err != nil { - return nil, err - } - return map[types.StoreKey]types.CommitKVStore{ - testStoreKey1: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl1").(types.CommitKVStore), - }, - testStoreKey2: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl2").(types.CommitKVStore), - }, - testStoreKey3: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("trans1").(types.CommitKVStore), - }, - }, nil -} - -func TestCommitStores(t *testing.T) { - testCases := []struct { - name string - committed int - exptectCommit int - }{ - { - "when upgrade not get interrupted", - 0, - 1, - }, - { - "when upgrade get interrupted once", - 1, - 0, - }, - { - "when upgrade get interrupted twice", - 2, - 0, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - storeMap, err := prepareStoreMap() - require.NoError(t, err) - store := storeMap[testStoreKey1].(*commitKVStoreStub) - for i := tc.committed; i > 0; i-- { - store.Commit() - } - store.Committed = 0 - var version int64 = 1 - removalMap := map[types.StoreKey]bool{} - res := commitStores(version, storeMap, removalMap) - for _, s := range res.StoreInfos { - require.Equal(t, version, s.CommitId.Version) - } - require.Equal(t, version, res.Version) - require.Equal(t, tc.exptectCommit, store.Committed) - }) - } -} diff --git a/store/snapshots/README.md b/store/snapshots/README.md deleted file mode 100644 index 6de7232468..0000000000 --- a/store/snapshots/README.md +++ /dev/null @@ -1,284 +0,0 @@ -# State Sync Snapshotting - -The `snapshots` package implements automatic support for CometBFT state sync -in Cosmos SDK-based applications. State sync allows a new node joining a network -to simply fetch a recent snapshot of the application state instead of fetching -and applying all historical blocks. This can reduce the time needed to join the -network by several orders of magnitude (e.g. weeks to minutes), but the node -will not contain historical data from previous heights. - -This document describes the Cosmos SDK implementation of the ABCI state sync -interface, for more information on CometBFT state sync in general see: - -* [CometBFT State Sync for Developers](https://medium.com/cometbft/cometbft-core-state-sync-for-developers-70a96ba3ee35) -* [ABCI State Sync Spec](https://docs.cometbft.com/v0.37/spec/p2p/messages/state-sync) -* [ABCI State Sync Method/Type Reference](https://docs.cometbft.com/v0.37/spec/p2p/messages/state-sync) - -## Overview - -For an overview of how Cosmos SDK state sync is set up and configured by -developers and end-users, see the -[Cosmos SDK State Sync Guide](https://blog.cosmos.network/cosmos-sdk-state-sync-guide-99e4cf43be2f). - -Briefly, the Cosmos SDK takes state snapshots at regular height intervals given -by `state-sync.snapshot-interval` and stores them as binary files in the -filesystem under `/data/snapshots/`, with metadata in a LevelDB database -`/data/snapshots/metadata.db`. The number of recent snapshots to keep are given by -`state-sync.snapshot-keep-recent`. - -Snapshots are taken asynchronously, i.e. new blocks will be applied concurrently -with snapshots being taken. This is possible because IAVL supports querying -immutable historical heights. However, this requires heights that are multiples of `state-sync.snapshot-interval` -to be kept until after the snapshot is complete. It is done to prevent a height from being removed -while it is being snapshotted. - -When a remote node is state syncing, CometBFT calls the ABCI method -`ListSnapshots` to list available local snapshots and `LoadSnapshotChunk` to -load a binary snapshot chunk. When the local node is being state synced, -CometBFT calls `OfferSnapshot` to offer a discovered remote snapshot to the -local application and `ApplySnapshotChunk` to apply a binary snapshot chunk to -the local application. See the resources linked above for more details on these -methods and how CometBFT performs state sync. - -The Cosmos SDK does not currently do any incremental verification of snapshots -during restoration, i.e. only after the entire snapshot has been restored will -CometBFT compare the app hash against the trusted hash from the chain. Cosmos -SDK snapshots and chunks do contain hashes as checksums to guard against IO -corruption and non-determinism, but these are not tied to the chain state and -can be trivially forged by an adversary. This was considered out of scope for -the initial implementation, but can be added later without changes to the -ABCI state sync protocol. - -## Relationship to Pruning - -Snapshot settings are optional. However, if set, they have an effect on how pruning is done by -persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. - -If pruning is enabled (not `pruning = "nothing"`), we avoid pruning heights that are multiples of -`state-sync.snapshot-interval` in the regular logic determined by the -pruning settings and applied after every `Commit()`. This is done to prevent a -height from being removed before a snapshot is complete. Therefore, we keep -such heights until after a snapshot is done. At this point, the height is sent to -the `pruning.Manager` to be pruned according to the pruning settings after the next `Commit()`. - -To illustrate, assume that we are currently at height 960 with `pruning-keep-recent = 50`, -`pruning-interval = 10`, and `state-sync.snapshot-interval = 100`. Let's assume that -the snapshot that was triggered at height `900` **just finishes**. Then, we can prune height -`900` right away (that is, when we call `Commit()` at height 960 because 900 is less than `960 - 50 = 910`). - -Let's now assume that all conditions stay the same but the snapshot at height 900 is **not complete yet**. -Then, we cannot prune it to avoid deleting a height that is still being snapshotted. Therefore, we keep track -of this height until the snapshot is complete. The height 900 will be pruned at the first height h that satisfied the following conditions: - -* the snapshot is complete -* h is a multiple of `pruning-interval` -* snapshot height is less than h - `pruning-keep-recent` - -Note that in both examples, if we let current height = C, and previous height P = C - 1, then for every height h that is: - -P - `pruning-keep-recent` - `pruning-interval` <= h <= P - `pruning-keep-recent` - -we can prune height h. In our first example, all heights 899 - 909 fall in this range and are pruned at height 960 as long as -h is not a snapshot height (E.g. 900). - -That is, we always use current height to determine at which height to prune (960) while we use previous -to determine which heights are to be pruned (959 - 50 - 10 = 899-909 = 959 - 50). - -## Configuration - -* `state-sync.snapshot-interval` - * the interval at which to take snapshots. - * the value of 0 disables snapshots. - * if pruning is enabled, it is done after a snapshot is complete for the heights that are multiples of this interval. - -* `state-sync.snapshot-keep-recent`: - * the number of recent snapshots to keep. - * 0 means keep all. - -## Snapshot Metadata - -The ABCI Protobuf type for a snapshot is listed below (refer to the ABCI spec -for field details): - -```protobuf -message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // Number of chunks in the snapshot - bytes hash = 4; // Arbitrary snapshot hash, equal only if identical - bytes metadata = 5; // Arbitrary application metadata -} -``` - -Because the `metadata` field is application-specific, the Cosmos SDK uses a -similar type `cosmos.base.snapshots.v1beta1.Snapshot` with its own metadata -representation: - -```protobuf -// Snapshot contains CometBFT state sync snapshot info. -message Snapshot { - uint64 height = 1; - uint32 format = 2; - uint32 chunks = 3; - bytes hash = 4; - Metadata metadata = 5 [(gogoproto.nullable) = false]; -} - -// Metadata contains SDK-specific snapshot metadata. -message Metadata { - repeated bytes chunk_hashes = 1; // SHA-256 chunk hashes -} -``` - -The `format` is currently `1`, defined in `snapshots.types.CurrentFormat`. This -must be increased whenever the binary snapshot format changes, and it may be -useful to support past formats in newer versions. - -The `hash` is a SHA-256 hash of the entire binary snapshot, used to guard -against IO corruption and non-determinism across nodes. Note that this is not -tied to the chain state, and can be trivially forged (but CometBFT will always -compare the final app hash against the chain app hash). Similarly, the -`chunk_hashes` are SHA-256 checksums of each binary chunk. - -The `metadata` field is Protobuf-serialized before it is placed into the ABCI -snapshot. - -## Snapshot Format - -The current version `1` snapshot format is a zlib-compressed, length-prefixed -Protobuf stream of `cosmos.base.store.v1beta1.SnapshotItem` messages, split into -chunks at exact 10 MB byte boundaries. - -```protobuf -// SnapshotItem is an item contained in a rootmulti.Store snapshot. -message SnapshotItem { - // item is the specific type of snapshot item. - oneof item { - SnapshotStoreItem store = 1; - SnapshotIAVLItem iavl = 2 [(gogoproto.customname) = "IAVL"]; - } -} - -// SnapshotStoreItem contains metadata about a snapshotted store. -message SnapshotStoreItem { - string name = 1; -} - -// SnapshotIAVLItem is an exported IAVL node. -message SnapshotIAVLItem { - bytes key = 1; - bytes value = 2; - int64 version = 3; - int32 height = 4; -} -``` - -Snapshots are generated by `rootmulti.Store.Snapshot()` as follows: - -1. Set up a `protoio.NewDelimitedWriter` that writes length-prefixed serialized - `SnapshotItem` Protobuf messages. - 1. Iterate over each IAVL store in lexicographical order by store name. - 2. Emit a `SnapshotStoreItem` containing the store name. - 3. Start an IAVL export for the store using - [`iavl.ImmutableTree.Export()`](https://pkg.go.dev/github.com/cosmos/iavl#ImmutableTree.Export). - 4. Iterate over each IAVL node. - 5. Emit a `SnapshotIAVLItem` for the IAVL node. -2. Pass the serialized Protobuf output stream to a zlib compression writer. -3. Split the zlib output stream into chunks at exactly every 10th megabyte. - -Snapshots are restored via `rootmulti.Store.Restore()` as the inverse of the above, using -[`iavl.MutableTree.Import()`](https://pkg.go.dev/github.com/cosmos/iavl#MutableTree.Import) -to reconstruct each IAVL tree. - -## Snapshot Storage - -Snapshot storage is managed by `snapshots.Store`, with metadata in a `db.DB` -database and binary chunks in the filesystem. Note that this is only used to -store locally taken snapshots that are being offered to other nodes. When the -local node is being state synced, CometBFT will take care of buffering and -storing incoming snapshot chunks before they are applied to the application. - -Metadata is generally stored in a LevelDB database at -`/data/snapshots/metadata.db`. It contains serialized -`cosmos.base.snapshots.v1beta1.Snapshot` Protobuf messages with a key given by -the concatenation of a key prefix, the big-endian height, and the big-endian -format. Chunk data is stored as regular files under -`/data/snapshots///`. - -The `snapshots.Store` API is based on streaming IO, and integrates easily with -the `snapshots.types.Snapshotter` snapshot/restore interface implemented by -`rootmulti.Store`. The `Store.Save()` method stores a snapshot given as a -`<- chan io.ReadCloser` channel of binary chunk streams, and `Store.Load()` loads -the snapshot as a channel of binary chunk streams -- the same stream types used -by `Snapshotter.Snapshot()` and `Snapshotter.Restore()` to take and restore -snapshots using streaming IO. - -The store also provides many other methods such as `List()` to list stored -snapshots, `LoadChunk()` to load a single snapshot chunk, and `Prune()` to prune -old snapshots. - -## Taking Snapshots - -`snapshots.Manager` is a high-level snapshot manager that integrates a -`snapshots.types.Snapshotter` (i.e. the `rootmulti.Store` snapshot -functionality) and a `snapshots.Store`, providing an API that maps easily onto -the ABCI state sync API. The `Manager` will also make sure only one operation -is in progress at a time, e.g. to prevent multiple snapshots being taken -concurrently. - -During `BaseApp.Commit`, once a state transition has been committed, the height -is checked against the `state-sync.snapshot-interval` setting. If the committed -height should be snapshotted, a goroutine `BaseApp.snapshot()` is spawned that -calls `snapshots.Manager.Create()` to create the snapshot. Once a snapshot is -complete and if pruning is enabled, the snapshot height is pruned away by the manager -with the call `PruneSnapshotHeight(...)` to the `snapshots.types.Snapshotter`. - -`Manager.Create()` will do some basic pre-flight checks, and then start -generating a snapshot by calling `rootmulti.Store.Snapshot()`. The chunk stream -is passed into `snapshots.Store.Save()`, which stores the chunks in the -filesystem and records the snapshot metadata in the snapshot database. - -Once the snapshot has been generated, `BaseApp.snapshot()` then removes any -old snapshots based on the `state-sync.snapshot-keep-recent` setting. - -## Serving Snapshots - -When a remote node is discovering snapshots for state sync, CometBFT will -call the `ListSnapshots` ABCI method to list the snapshots present on the -local node. This is dispatched to `snapshots.Manager.List()`, which in turn -dispatches to `snapshots.Store.List()`. - -When a remote node is fetching snapshot chunks during state sync, CometBFT -will call the `LoadSnapshotChunk` ABCI method to fetch a chunk from the local -node. This dispatches to `snapshots.Manager.LoadChunk()`, which in turn -dispatches to `snapshots.Store.LoadChunk()`. - -## Restoring Snapshots - -When the operator has configured the local CometBFT node to run state sync -(see the resources listed in the introduction for details on CometBFT state -sync), it will discover snapshots across the P2P network and offer their -metadata in turn to the local application via the `OfferSnapshot` ABCI call. - -`BaseApp.OfferSnapshot()` attempts to start a restore operation by calling -`snapshots.Manager.Restore()`. This may fail, e.g. if the snapshot format is -unknown (it may have been generated by a different version of the Cosmos SDK), -in which case CometBFT will offer other discovered snapshots. - -If the snapshot is accepted, `Manager.Restore()` will record that a restore -operation is in progress, and spawn a separate goroutine that runs a synchronous -`rootmulti.Store.Restore()` snapshot restoration which will be fed snapshot -chunks until it is complete. - -CometBFT will then start fetching and buffering chunks, providing them in -order via ABCI `ApplySnapshotChunk` calls. These dispatch to -`Manager.RestoreChunk()`, which passes the chunks to the ongoing restore -process, checking if errors have been encountered yet (e.g. due to checksum -mismatches or invalid IAVL data). Once the final chunk is passed, -`Manager.RestoreChunk()` will wait for the restore process to complete before -returning. - -Once the restore is completed, CometBFT will go on to call the `Info` ABCI -call to fetch the app hash, and compare this against the trusted chain app -hash at the snapshot height to verify the restored state. If it matches, -CometBFT goes on to process blocks. diff --git a/store/snapshots/chunk.go b/store/snapshots/chunk.go deleted file mode 100644 index 3a4981b728..0000000000 --- a/store/snapshots/chunk.go +++ /dev/null @@ -1,185 +0,0 @@ -package snapshots - -import ( - "io" - "math" - - "cosmossdk.io/errors" - snapshottypes "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" -) - -// ChunkWriter reads an input stream, splits it into fixed-size chunks, and writes them to a -// sequence of io.ReadClosers via a channel. -type ChunkWriter struct { - ch chan<- io.ReadCloser - pipe *io.PipeWriter - chunkSize uint64 - written uint64 - closed bool -} - -// NewChunkWriter creates a new ChunkWriter. If chunkSize is 0, no chunking will be done. -func NewChunkWriter(ch chan<- io.ReadCloser, chunkSize uint64) *ChunkWriter { - return &ChunkWriter{ - ch: ch, - chunkSize: chunkSize, - } -} - -// chunk creates a new chunk. -func (w *ChunkWriter) chunk() error { - if w.pipe != nil { - err := w.pipe.Close() - if err != nil { - return err - } - } - pr, pw := io.Pipe() - w.ch <- pr - w.pipe = pw - w.written = 0 - return nil -} - -// Close implements io.Closer. -func (w *ChunkWriter) Close() error { - if !w.closed { - w.closed = true - close(w.ch) - var err error - if w.pipe != nil { - err = w.pipe.Close() - } - return err - } - return nil -} - -// CloseWithError closes the writer and sends an error to the reader. -func (w *ChunkWriter) CloseWithError(err error) { - if !w.closed { - if w.pipe == nil { - // create a dummy pipe just to propagate the error to the reader, it always returns nil - _ = w.chunk() - } - w.closed = true - close(w.ch) - _ = w.pipe.CloseWithError(err) // CloseWithError always returns nil - } -} - -// Write implements io.Writer. -func (w *ChunkWriter) Write(data []byte) (int, error) { - if w.closed { - return 0, errors.Wrap(storetypes.ErrLogic, "cannot write to closed ChunkWriter") - } - nTotal := 0 - for len(data) > 0 { - if w.pipe == nil || (w.written >= w.chunkSize && w.chunkSize > 0) { - err := w.chunk() - if err != nil { - return nTotal, err - } - } - - var writeSize uint64 - if w.chunkSize == 0 { - writeSize = uint64(len(data)) - } else { - writeSize = w.chunkSize - w.written - } - if writeSize > uint64(len(data)) { - writeSize = uint64(len(data)) - } - - n, err := w.pipe.Write(data[:writeSize]) - w.written += uint64(n) - nTotal += n - if err != nil { - return nTotal, err - } - data = data[writeSize:] - } - return nTotal, nil -} - -// ChunkReader reads chunks from a channel of io.ReadClosers and outputs them as an io.Reader -type ChunkReader struct { - ch <-chan io.ReadCloser - reader io.ReadCloser -} - -// NewChunkReader creates a new ChunkReader. -func NewChunkReader(ch <-chan io.ReadCloser) *ChunkReader { - return &ChunkReader{ch: ch} -} - -// next fetches the next chunk from the channel, or returns io.EOF if there are no more chunks. -func (r *ChunkReader) next() error { - reader, ok := <-r.ch - if !ok { - return io.EOF - } - r.reader = reader - return nil -} - -// Close implements io.ReadCloser. -func (r *ChunkReader) Close() error { - var err error - if r.reader != nil { - err = r.reader.Close() - r.reader = nil - } - for reader := range r.ch { - if e := reader.Close(); e != nil && err == nil { - err = e - } - } - return err -} - -// Read implements io.Reader. -func (r *ChunkReader) Read(p []byte) (int, error) { - if r.reader == nil { - err := r.next() - if err != nil { - return 0, err - } - } - n, err := r.reader.Read(p) - if errors.IsOf(err, io.EOF) { - err = r.reader.Close() - r.reader = nil - if err != nil { - return 0, err - } - return r.Read(p) - } - return n, err -} - -// DrainChunks drains and closes all remaining chunks from a chunk channel. -func DrainChunks(chunks <-chan io.ReadCloser) { - for chunk := range chunks { - _ = chunk.Close() - } -} - -// ValidRestoreHeight will check height is valid for snapshot restore or not -func ValidRestoreHeight(format uint32, height uint64) error { - if format != snapshottypes.CurrentFormat { - return errors.Wrapf(snapshottypes.ErrUnknownFormat, "format %v", format) - } - - if height == 0 { - return errors.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0") - } - if height > uint64(math.MaxInt64) { - return errors.Wrapf(snapshottypes.ErrInvalidMetadata, - "snapshot height %v cannot exceed %v", height, int64(math.MaxInt64)) - } - - return nil -} diff --git a/store/snapshots/chunk_test.go b/store/snapshots/chunk_test.go deleted file mode 100644 index df524cdf3c..0000000000 --- a/store/snapshots/chunk_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package snapshots_test - -import ( - "bytes" - "errors" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/snapshots" -) - -func TestChunkWriter(t *testing.T) { - ch := make(chan io.ReadCloser, 100) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, 2) - - n, err := chunkWriter.Write([]byte{1, 2, 3}) - require.NoError(t, err) - assert.Equal(t, 3, n) - - n, err = chunkWriter.Write([]byte{4, 5, 6}) - require.NoError(t, err) - assert.Equal(t, 3, n) - - n, err = chunkWriter.Write([]byte{7, 8, 9}) - require.NoError(t, err) - assert.Equal(t, 3, n) - - err = chunkWriter.Close() - require.NoError(t, err) - - // closed writer should error - _, err = chunkWriter.Write([]byte{10}) - require.Error(t, err) - - // closing again should be fine - err = chunkWriter.Close() - require.NoError(t, err) - }() - - assert.Equal(t, [][]byte{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9}}, readChunks(ch)) - - // 0-sized chunks should return the whole body as one chunk - ch = make(chan io.ReadCloser, 100) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, 0) - _, err := chunkWriter.Write([]byte{1, 2, 3}) - require.NoError(t, err) - _, err = chunkWriter.Write([]byte{4, 5, 6}) - require.NoError(t, err) - err = chunkWriter.Close() - require.NoError(t, err) - }() - assert.Equal(t, [][]byte{{1, 2, 3, 4, 5, 6}}, readChunks(ch)) - - // closing with error should return the error - theErr := errors.New("boom") - ch = make(chan io.ReadCloser, 100) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, 2) - _, err := chunkWriter.Write([]byte{1, 2, 3}) - require.NoError(t, err) - chunkWriter.CloseWithError(theErr) - }() - chunk, err := io.ReadAll(<-ch) - require.NoError(t, err) - assert.Equal(t, []byte{1, 2}, chunk) - _, err = io.ReadAll(<-ch) - require.Error(t, err) - assert.Equal(t, theErr, err) - assert.Empty(t, ch) - - // closing immediately should return no chunks - ch = make(chan io.ReadCloser, 100) - chunkWriter := snapshots.NewChunkWriter(ch, 2) - err = chunkWriter.Close() - require.NoError(t, err) - assert.Empty(t, ch) -} - -func TestChunkReader(t *testing.T) { - ch := makeChunks([][]byte{ - {1, 2, 3}, - {4}, - {}, - {5, 6}, - }) - chunkReader := snapshots.NewChunkReader(ch) - - buf := []byte{0, 0, 0, 0} - n, err := chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, 3, n) - assert.Equal(t, []byte{1, 2, 3, 0}, buf) - - buf = []byte{0, 0, 0, 0} - n, err = chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, 1, n) - assert.Equal(t, []byte{4, 0, 0, 0}, buf) - - buf = []byte{0, 0, 0, 0} - n, err = chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, 2, n) - assert.Equal(t, []byte{5, 6, 0, 0}, buf) - - buf = []byte{0, 0, 0, 0} - _, err = chunkReader.Read(buf) - require.Error(t, err) - assert.Equal(t, io.EOF, err) - - err = chunkReader.Close() - require.NoError(t, err) - - err = chunkReader.Close() // closing twice should be fine - require.NoError(t, err) - - // Empty channel should be fine - ch = makeChunks(nil) - chunkReader = snapshots.NewChunkReader(ch) - buf = make([]byte, 4) - _, err = chunkReader.Read(buf) - require.Error(t, err) - assert.Equal(t, io.EOF, err) - - // Using a pipe that closes with an error should return the error - theErr := errors.New("boom") - pr, pw := io.Pipe() - pch := make(chan io.ReadCloser, 1) - pch <- pr - _ = pw.CloseWithError(theErr) - - chunkReader = snapshots.NewChunkReader(pch) - buf = make([]byte, 4) - _, err = chunkReader.Read(buf) - require.Error(t, err) - assert.Equal(t, theErr, err) - - // Closing the reader should close the writer - pr, pw = io.Pipe() - pch = make(chan io.ReadCloser, 2) - pch <- io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3})) - pch <- pr - close(pch) - - go func() { - chunkReader := snapshots.NewChunkReader(pch) - buf := []byte{0, 0, 0, 0} - _, err := chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, []byte{1, 2, 3, 0}, buf) - - err = chunkReader.Close() - require.NoError(t, err) - }() - - _, err = pw.Write([]byte{9, 9, 9}) - require.Error(t, err) - assert.Equal(t, err, io.ErrClosedPipe) -} diff --git a/store/snapshots/helpers_test.go b/store/snapshots/helpers_test.go deleted file mode 100644 index af623b0256..0000000000 --- a/store/snapshots/helpers_test.go +++ /dev/null @@ -1,337 +0,0 @@ -package snapshots_test - -import ( - "bufio" - "bytes" - "compress/zlib" - "crypto/sha256" - "errors" - "io" - "os" - "testing" - "time" - - db "github.com/cosmos/cosmos-db" - protoio "github.com/cosmos/gogoproto/io" - "github.com/stretchr/testify/require" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/store/snapshots" - snapshottypes "cosmossdk.io/store/snapshots/types" - "cosmossdk.io/store/types" -) - -func checksums(slice [][]byte) [][]byte { - hasher := sha256.New() - checksums := make([][]byte, len(slice)) - for i, chunk := range slice { - hasher.Write(chunk) - checksums[i] = hasher.Sum(nil) - hasher.Reset() - } - return checksums -} - -func hash(chunks [][]byte) []byte { - hasher := sha256.New() - for _, chunk := range chunks { - hasher.Write(chunk) - } - return hasher.Sum(nil) -} - -func makeChunks(chunks [][]byte) <-chan io.ReadCloser { - ch := make(chan io.ReadCloser, len(chunks)) - for _, chunk := range chunks { - ch <- io.NopCloser(bytes.NewReader(chunk)) - } - close(ch) - return ch -} - -func readChunks(chunks <-chan io.ReadCloser) [][]byte { - bodies := [][]byte{} - for chunk := range chunks { - body, err := io.ReadAll(chunk) - if err != nil { - panic(err) - } - bodies = append(bodies, body) - } - return bodies -} - -// snapshotItems serialize a array of bytes as SnapshotItem_ExtensionPayload, and return the chunks. -func snapshotItems(items [][]byte, ext snapshottypes.ExtensionSnapshotter) [][]byte { - // copy the same parameters from the code - snapshotChunkSize := uint64(10e6) - snapshotBufferSize := int(snapshotChunkSize) - - ch := make(chan io.ReadCloser) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, snapshotChunkSize) - bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) - zWriter, _ := zlib.NewWriterLevel(bufWriter, 7) - protoWriter := protoio.NewDelimitedWriter(zWriter) - for _, item := range items { - _ = snapshottypes.WriteExtensionPayload(protoWriter, item) - } - // write extension metadata - _ = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{ - Item: &snapshottypes.SnapshotItem_Extension{ - Extension: &snapshottypes.SnapshotExtensionMeta{ - Name: ext.SnapshotName(), - Format: ext.SnapshotFormat(), - }, - }, - }) - _ = ext.SnapshotExtension(0, func(payload []byte) error { - return snapshottypes.WriteExtensionPayload(protoWriter, payload) - }) - _ = protoWriter.Close() - _ = bufWriter.Flush() - _ = chunkWriter.Close() - }() - - var chunks [][]byte - for chunkBody := range ch { - chunk, err := io.ReadAll(chunkBody) - if err != nil { - panic(err) - } - chunks = append(chunks, chunk) - } - - return chunks -} - -type mockSnapshotter struct { - items [][]byte - prunedHeights map[int64]struct{} - snapshotInterval uint64 -} - -func (m *mockSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshottypes.SnapshotItem, error) { - if format == 0 { - return snapshottypes.SnapshotItem{}, snapshottypes.ErrUnknownFormat - } - if m.items != nil { - return snapshottypes.SnapshotItem{}, errors.New("already has contents") - } - - var item snapshottypes.SnapshotItem - m.items = [][]byte{} - for { - item.Reset() - err := protoReader.ReadMsg(&item) - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message") - } - payload := item.GetExtensionPayload() - if payload == nil { - break - } - m.items = append(m.items, payload.Payload) - } - - return item, nil -} - -func (m *mockSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - for _, item := range m.items { - if err := snapshottypes.WriteExtensionPayload(protoWriter, item); err != nil { - return err - } - } - return nil -} - -func (m *mockSnapshotter) SnapshotFormat() uint32 { - return snapshottypes.CurrentFormat -} - -func (m *mockSnapshotter) SupportedFormats() []uint32 { - return []uint32{snapshottypes.CurrentFormat} -} - -func (m *mockSnapshotter) PruneSnapshotHeight(height int64) { - m.prunedHeights[height] = struct{}{} -} - -func (m *mockSnapshotter) GetSnapshotInterval() uint64 { - return m.snapshotInterval -} - -func (m *mockSnapshotter) SetSnapshotInterval(snapshotInterval uint64) { - m.snapshotInterval = snapshotInterval -} - -type mockErrorSnapshotter struct{} - -var _ snapshottypes.Snapshotter = (*mockErrorSnapshotter)(nil) - -func (m *mockErrorSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - return errors.New("mock snapshot error") -} - -func (m *mockErrorSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshottypes.SnapshotItem, error) { - return snapshottypes.SnapshotItem{}, errors.New("mock restore error") -} - -func (m *mockErrorSnapshotter) SnapshotFormat() uint32 { - return snapshottypes.CurrentFormat -} - -func (m *mockErrorSnapshotter) SupportedFormats() []uint32 { - return []uint32{snapshottypes.CurrentFormat} -} - -func (m *mockErrorSnapshotter) PruneSnapshotHeight(height int64) { -} - -func (m *mockErrorSnapshotter) GetSnapshotInterval() uint64 { - return 0 -} - -func (m *mockErrorSnapshotter) SetSnapshotInterval(snapshotInterval uint64) { -} - -// setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1. -// The snapshot will complete when the returned closer is called. -func setupBusyManager(t *testing.T) *snapshots.Manager { - t.Helper() - store, err := snapshots.NewStore(db.NewMemDB(), t.TempDir()) - require.NoError(t, err) - hung := newHungSnapshotter() - hung.SetSnapshotInterval(opts.Interval) - mgr := snapshots.NewManager(store, opts, hung, nil, log.NewNopLogger()) - require.Equal(t, opts.Interval, hung.snapshotInterval) - - // Channel to ensure the test doesn't finish until the goroutine is done. - // Without this, there are intermittent test failures about - // the t.TempDir() cleanup failing due to the directory not being empty. - done := make(chan struct{}) - - go func() { - defer close(done) - _, err := mgr.Create(1) - require.NoError(t, err) - _, didPruneHeight := hung.prunedHeights[1] - require.True(t, didPruneHeight) - }() - time.Sleep(10 * time.Millisecond) - - t.Cleanup(func() { - <-done - }) - - t.Cleanup(hung.Close) - - return mgr -} - -// hungSnapshotter can be used to test operations in progress. Call close to end the snapshot. -type hungSnapshotter struct { - ch chan struct{} - prunedHeights map[int64]struct{} - snapshotInterval uint64 -} - -func newHungSnapshotter() *hungSnapshotter { - return &hungSnapshotter{ - ch: make(chan struct{}), - prunedHeights: make(map[int64]struct{}), - } -} - -func (m *hungSnapshotter) Close() { - close(m.ch) -} - -func (m *hungSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - <-m.ch - return nil -} - -func (m *hungSnapshotter) PruneSnapshotHeight(height int64) { - m.prunedHeights[height] = struct{}{} -} - -func (m *hungSnapshotter) SetSnapshotInterval(snapshotInterval uint64) { - m.snapshotInterval = snapshotInterval -} - -func (m *hungSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, -) (snapshottypes.SnapshotItem, error) { - panic("not implemented") -} - -type extSnapshotter struct { - state []uint64 -} - -func newExtSnapshotter(count int) *extSnapshotter { - state := make([]uint64, 0, count) - for i := 0; i < count; i++ { - state = append(state, uint64(i)) - } - return &extSnapshotter{ - state, - } -} - -func (s *extSnapshotter) SnapshotName() string { - return "mock" -} - -func (s *extSnapshotter) SnapshotFormat() uint32 { - return 1 -} - -func (s *extSnapshotter) SupportedFormats() []uint32 { - return []uint32{1} -} - -func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshottypes.ExtensionPayloadWriter) error { - for _, i := range s.state { - if err := payloadWriter(types.Uint64ToBigEndian(i)); err != nil { - return err - } - } - return nil -} - -func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshottypes.ExtensionPayloadReader) error { - for { - payload, err := payloadReader() - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return err - } - s.state = append(s.state, types.BigEndianToUint64(payload)) - } - // finalize restoration - return nil -} - -// GetTempDir returns a writable temporary director for the test to use. -func GetTempDir(tb testing.TB) string { - tb.Helper() - // os.MkDir() is used instead of testing.T.TempDir() - // see https://github.com/cosmos/cosmos-sdk/pull/8475 and - // https://github.com/cosmos/cosmos-sdk/pull/10341 for - // this change's rationale. - tempdir, err := os.MkdirTemp("", "") - require.NoError(tb, err) - tb.Cleanup(func() { _ = os.RemoveAll(tempdir) }) - return tempdir -} diff --git a/store/snapshots/manager.go b/store/snapshots/manager.go deleted file mode 100644 index 4d75690c39..0000000000 --- a/store/snapshots/manager.go +++ /dev/null @@ -1,557 +0,0 @@ -package snapshots - -import ( - "bytes" - "crypto/sha256" - "errors" - "fmt" - "io" - "math" - "os" - "sort" - "sync" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" -) - -// Manager manages snapshot and restore operations for an app, making sure only a single -// long-running operation is in progress at any given time, and provides convenience methods -// mirroring the ABCI interface. -// -// Although the ABCI interface (and this manager) passes chunks as byte slices, the internal -// snapshot/restore APIs use IO streams (i.e. chan io.ReadCloser), for two reasons: -// -// 1. In the future, ABCI should support streaming. Consider e.g. InitChain during chain -// upgrades, which currently passes the entire chain state as an in-memory byte slice. -// https://github.com/tendermint/tendermint/issues/5184 -// -// 2. io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary -// errors via io.Pipe.CloseWithError(). -type Manager struct { - extensions map[string]types.ExtensionSnapshotter - // store is the snapshot store where all completed snapshots are persisted. - store *Store - opts types.SnapshotOptions - // multistore is the store from which snapshots are taken. - multistore types.Snapshotter - logger storetypes.Logger - - mtx sync.Mutex - operation operation - chRestore chan<- uint32 - chRestoreDone <-chan restoreDone - restoreSnapshot *types.Snapshot - restoreChunkIndex uint32 -} - -// operation represents a Manager operation. Only one operation can be in progress at a time. -type operation string - -// restoreDone represents the result of a restore operation. -type restoreDone struct { - complete bool // if true, restore completed successfully (not prematurely) - err error // if non-nil, restore errored -} - -const ( - opNone operation = "" - opSnapshot operation = "snapshot" - opPrune operation = "prune" - opRestore operation = "restore" - - chunkBufferSize = 4 - chunkIDBufferSize = 1024 - - snapshotMaxItemSize = int(64e6) // SDK has no key/value size limit, so we set an arbitrary limit -) - -var ErrOptsZeroSnapshotInterval = errors.New("snaphot-interval must not be 0") - -// NewManager creates a new manager. -func NewManager(store *Store, opts types.SnapshotOptions, multistore types.Snapshotter, extensions map[string]types.ExtensionSnapshotter, logger storetypes.Logger) *Manager { - if extensions == nil { - extensions = map[string]types.ExtensionSnapshotter{} - } - return &Manager{ - store: store, - opts: opts, - multistore: multistore, - extensions: extensions, - logger: logger, - } -} - -// RegisterExtensions register extension snapshotters to manager -func (m *Manager) RegisterExtensions(extensions ...types.ExtensionSnapshotter) error { - if m.extensions == nil { - m.extensions = make(map[string]types.ExtensionSnapshotter, len(extensions)) - } - for _, extension := range extensions { - name := extension.SnapshotName() - if _, ok := m.extensions[name]; ok { - return fmt.Errorf("duplicated snapshotter name: %s", name) - } - if !IsFormatSupported(extension, extension.SnapshotFormat()) { - return fmt.Errorf("snapshotter don't support it's own snapshot format: %s %d", name, extension.SnapshotFormat()) - } - m.extensions[name] = extension - } - return nil -} - -// begin starts an operation, or errors if one is in progress. It manages the mutex itself. -func (m *Manager) begin(op operation) error { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.beginLocked(op) -} - -// beginLocked begins an operation while already holding the mutex. -func (m *Manager) beginLocked(op operation) error { - if op == opNone { - return errorsmod.Wrap(storetypes.ErrLogic, "can't begin a none operation") - } - if m.operation != opNone { - return errorsmod.Wrapf(storetypes.ErrConflict, "a %v operation is in progress", m.operation) - } - m.operation = op - return nil -} - -// end ends the current operation. -func (m *Manager) end() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.endLocked() -} - -// endLocked ends the current operation while already holding the mutex. -func (m *Manager) endLocked() { - m.operation = opNone - if m.chRestore != nil { - close(m.chRestore) - m.chRestore = nil - } - m.chRestoreDone = nil - m.restoreSnapshot = nil - m.restoreChunkIndex = 0 -} - -// GetInterval returns snapshot interval represented in heights. -func (m *Manager) GetInterval() uint64 { - return m.opts.Interval -} - -// GetKeepRecent returns snapshot keep-recent represented in heights. -func (m *Manager) GetKeepRecent() uint32 { - return m.opts.KeepRecent -} - -// GetSnapshotBlockRetentionHeights returns the number of heights needed -// for block retention. Blocks since the oldest available snapshot must be -// available for state sync nodes to catch up (oldest because a node may be -// restoring an old snapshot while a new snapshot was taken). -func (m *Manager) GetSnapshotBlockRetentionHeights() int64 { - return int64(m.opts.Interval * uint64(m.opts.KeepRecent)) -} - -// Create creates a snapshot and returns its metadata. -func (m *Manager) Create(height uint64) (*types.Snapshot, error) { - if m == nil { - return nil, errorsmod.Wrap(storetypes.ErrLogic, "no snapshot store configured") - } - - defer m.multistore.PruneSnapshotHeight(int64(height)) - - err := m.begin(opSnapshot) - if err != nil { - return nil, err - } - defer m.end() - - latest, err := m.store.GetLatest() - if err != nil { - return nil, errorsmod.Wrap(err, "failed to examine latest snapshot") - } - if latest != nil && latest.Height >= height { - return nil, errorsmod.Wrapf(storetypes.ErrConflict, - "a more recent snapshot already exists at height %v", latest.Height) - } - - // Spawn goroutine to generate snapshot chunks and pass their io.ReadClosers through a channel - ch := make(chan io.ReadCloser) - go m.createSnapshot(height, ch) - - return m.store.Save(height, types.CurrentFormat, ch) -} - -// createSnapshot do the heavy work of snapshotting after the validations of request are done -// the produced chunks are written to the channel. -func (m *Manager) createSnapshot(height uint64, ch chan<- io.ReadCloser) { - streamWriter := NewStreamWriter(ch) - if streamWriter == nil { - return - } - defer func() { - if err := streamWriter.Close(); err != nil { - streamWriter.CloseWithError(err) - } - }() - - if err := m.multistore.Snapshot(height, streamWriter); err != nil { - streamWriter.CloseWithError(err) - return - } - for _, name := range m.sortedExtensionNames() { - extension := m.extensions[name] - // write extension metadata - err := streamWriter.WriteMsg(&types.SnapshotItem{ - Item: &types.SnapshotItem_Extension{ - Extension: &types.SnapshotExtensionMeta{ - Name: name, - Format: extension.SnapshotFormat(), - }, - }, - }) - if err != nil { - streamWriter.CloseWithError(err) - return - } - payloadWriter := func(payload []byte) error { - return types.WriteExtensionPayload(streamWriter, payload) - } - if err := extension.SnapshotExtension(height, payloadWriter); err != nil { - streamWriter.CloseWithError(err) - return - } - } -} - -// List lists snapshots, mirroring ABCI ListSnapshots. It can be concurrent with other operations. -func (m *Manager) List() ([]*types.Snapshot, error) { - return m.store.List() -} - -// LoadChunk loads a chunk into a byte slice, mirroring ABCI LoadChunk. It can be called -// concurrently with other operations. If the chunk does not exist, nil is returned. -func (m *Manager) LoadChunk(height uint64, format, chunk uint32) ([]byte, error) { - reader, err := m.store.LoadChunk(height, format, chunk) - if err != nil { - return nil, err - } - if reader == nil { - return nil, nil - } - defer reader.Close() - - return io.ReadAll(reader) -} - -// Prune prunes snapshots, if no other operations are in progress. -func (m *Manager) Prune(retain uint32) (uint64, error) { - err := m.begin(opPrune) - if err != nil { - return 0, err - } - defer m.end() - return m.store.Prune(retain) -} - -// Restore begins an async snapshot restoration, mirroring ABCI OfferSnapshot. Chunks must be fed -// via RestoreChunk() until the restore is complete or a chunk fails. -func (m *Manager) Restore(snapshot types.Snapshot) error { - if snapshot.Chunks == 0 { - return errorsmod.Wrap(types.ErrInvalidMetadata, "no chunks") - } - if uint32(len(snapshot.Metadata.ChunkHashes)) != snapshot.Chunks { - return errorsmod.Wrapf(types.ErrInvalidMetadata, "snapshot has %v chunk hashes, but %v chunks", - uint32(len(snapshot.Metadata.ChunkHashes)), - snapshot.Chunks) - } - m.mtx.Lock() - defer m.mtx.Unlock() - - // check multistore supported format preemptive - if snapshot.Format != types.CurrentFormat { - return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format) - } - if snapshot.Height == 0 { - return errorsmod.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0") - } - if snapshot.Height > uint64(math.MaxInt64) { - return errorsmod.Wrapf(types.ErrInvalidMetadata, - "snapshot height %v cannot exceed %v", snapshot.Height, int64(math.MaxInt64)) - } - - err := m.beginLocked(opRestore) - if err != nil { - return err - } - - // Start an asynchronous snapshot restoration, passing chunks and completion status via channels. - chChunkIDs := make(chan uint32, chunkIDBufferSize) - chDone := make(chan restoreDone, 1) - - dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) - if err := os.MkdirAll(dir, 0o750); err != nil { - return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - chChunks := m.loadChunkStream(snapshot.Height, snapshot.Format, chChunkIDs) - - go func() { - err := m.doRestoreSnapshot(snapshot, chChunks) - chDone <- restoreDone{ - complete: err == nil, - err: err, - } - close(chDone) - }() - - m.chRestore = chChunkIDs - m.chRestoreDone = chDone - m.restoreSnapshot = &snapshot - m.restoreChunkIndex = 0 - return nil -} - -func (m *Manager) loadChunkStream(height uint64, format uint32, chunkIDs <-chan uint32) <-chan io.ReadCloser { - chunks := make(chan io.ReadCloser, chunkBufferSize) - go func() { - defer close(chunks) - - for chunkID := range chunkIDs { - chunk, err := m.store.loadChunkFile(height, format, chunkID) - if err != nil { - m.logger.Error("load chunk file failed", "height", height, "format", format, "chunk", chunkID, "err", err) - break - } - chunks <- chunk - } - }() - - return chunks -} - -// doRestoreSnapshot do the heavy work of snapshot restoration after preliminary checks on request have passed. -func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.ReadCloser) error { - dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) - if err := os.MkdirAll(dir, 0o750); err != nil { - return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - var nextItem types.SnapshotItem - streamReader, err := NewStreamReader(chChunks) - if err != nil { - return err - } - defer streamReader.Close() - - // payloadReader reads an extension payload for extension snapshotter, it returns `io.EOF` at extension boundaries. - payloadReader := func() ([]byte, error) { - nextItem.Reset() - if err := streamReader.ReadMsg(&nextItem); err != nil { - return nil, err - } - payload := nextItem.GetExtensionPayload() - if payload == nil { - return nil, io.EOF - } - return payload.Payload, nil - } - - nextItem, err = m.multistore.Restore(snapshot.Height, snapshot.Format, streamReader) - if err != nil { - return errorsmod.Wrap(err, "multistore restore") - } - - for { - if nextItem.Item == nil { - // end of stream - break - } - metadata := nextItem.GetExtension() - if metadata == nil { - return errorsmod.Wrapf(storetypes.ErrLogic, "unknown snapshot item %T", nextItem.Item) - } - extension, ok := m.extensions[metadata.Name] - if !ok { - return errorsmod.Wrapf(storetypes.ErrLogic, "unknown extension snapshotter %s", metadata.Name) - } - if !IsFormatSupported(extension, metadata.Format) { - return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name) - } - - if err := extension.RestoreExtension(snapshot.Height, metadata.Format, payloadReader); err != nil { - return errorsmod.Wrapf(err, "extension %s restore", metadata.Name) - } - - if nextItem.GetExtensionPayload() != nil { - return fmt.Errorf("extension %s don't exhausted payload stream", metadata.Name) - } - } - return nil -} - -// RestoreChunk adds a chunk to an active snapshot restoration, mirroring ABCI ApplySnapshotChunk. -// Chunks must be given until the restore is complete, returning true, or a chunk errors. -func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - if m.operation != opRestore { - return false, errorsmod.Wrap(storetypes.ErrLogic, "no restore operation in progress") - } - - if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { - return false, errorsmod.Wrap(storetypes.ErrLogic, "received unexpected chunk") - } - - // Check if any errors have occurred yet. - select { - case done := <-m.chRestoreDone: - m.endLocked() - if done.err != nil { - return false, done.err - } - return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended unexpectedly") - default: - } - - // Verify the chunk hash. - hash := sha256.Sum256(chunk) - expected := m.restoreSnapshot.Metadata.ChunkHashes[m.restoreChunkIndex] - if !bytes.Equal(hash[:], expected) { - return false, errorsmod.Wrapf(types.ErrChunkHashMismatch, - "expected %x, got %x", hash, expected) - } - - if err := m.store.saveChunkContent(chunk, m.restoreChunkIndex, m.restoreSnapshot); err != nil { - return false, errorsmod.Wrapf(err, "save chunk content %d", m.restoreChunkIndex) - } - - // Pass the chunk to the restore, and wait for completion if it was the final one. - m.chRestore <- m.restoreChunkIndex - m.restoreChunkIndex++ - - if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { - close(m.chRestore) - m.chRestore = nil - - // the chunks are all written into files, we can save the snapshot to the db, - // even if the restoration may not completed yet. - if err := m.store.saveSnapshot(m.restoreSnapshot); err != nil { - return false, errorsmod.Wrap(err, "save restoring snapshot") - } - - done := <-m.chRestoreDone - m.endLocked() - if done.err != nil { - return false, done.err - } - if !done.complete { - return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended prematurely") - } - - return true, nil - } - return false, nil -} - -// RestoreLocalSnapshot restores app state from a local snapshot. -func (m *Manager) RestoreLocalSnapshot(height uint64, format uint32) error { - snapshot, ch, err := m.store.Load(height, format) - if err != nil { - return err - } - - if snapshot == nil { - return fmt.Errorf("snapshot doesn't exist, height: %d, format: %d", height, format) - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - err = m.beginLocked(opRestore) - if err != nil { - return err - } - defer m.endLocked() - - return m.doRestoreSnapshot(*snapshot, ch) -} - -// sortedExtensionNames sort extension names for deterministic iteration. -func (m *Manager) sortedExtensionNames() []string { - names := make([]string, 0, len(m.extensions)) - for name := range m.extensions { - names = append(names, name) - } - - sort.Strings(names) - return names -} - -// IsFormatSupported returns if the snapshotter supports restoration from given format. -func IsFormatSupported(snapshotter types.ExtensionSnapshotter, format uint32) bool { - for _, i := range snapshotter.SupportedFormats() { - if i == format { - return true - } - } - return false -} - -// SnapshotIfApplicable takes a snapshot of the current state if we are on a snapshot height. -// It also prunes any old snapshots. -func (m *Manager) SnapshotIfApplicable(height int64) { - if m == nil { - return - } - if !m.shouldTakeSnapshot(height) { - m.logger.Debug("snapshot is skipped", "height", height) - return - } - // start the routine after need to create a snapshot - go m.snapshot(height) -} - -// shouldTakeSnapshot returns true is snapshot should be taken at height. -func (m *Manager) shouldTakeSnapshot(height int64) bool { - return m.opts.Interval > 0 && uint64(height)%m.opts.Interval == 0 -} - -func (m *Manager) snapshot(height int64) { - m.logger.Info("creating state snapshot", "height", height) - - if height <= 0 { - m.logger.Error("snapshot height must be positive", "height", height) - return - } - - snapshot, err := m.Create(uint64(height)) - if err != nil { - m.logger.Error("failed to create state snapshot", "height", height, "err", err) - return - } - - m.logger.Info("completed state snapshot", "height", height, "format", snapshot.Format) - - if m.opts.KeepRecent > 0 { - m.logger.Debug("pruning state snapshots") - - pruned, err := m.Prune(m.opts.KeepRecent) - if err != nil { - m.logger.Error("Failed to prune state snapshots", "err", err) - return - } - - m.logger.Debug("pruned state snapshots", "pruned", pruned) - } -} - -// Close the snapshot database. -func (m *Manager) Close() error { - return m.store.db.Close() -} diff --git a/store/snapshots/manager_test.go b/store/snapshots/manager_test.go deleted file mode 100644 index 49f31e8627..0000000000 --- a/store/snapshots/manager_test.go +++ /dev/null @@ -1,258 +0,0 @@ -package snapshots_test - -import ( - "errors" - "testing" - - db "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/snapshots" - "cosmossdk.io/store/snapshots/types" -) - -var opts = types.NewSnapshotOptions(1500, 2) - -func TestManager_List(t *testing.T) { - store := setupStore(t) - snapshotter := &mockSnapshotter{} - snapshotter.SetSnapshotInterval(opts.Interval) - manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) - require.Equal(t, opts.Interval, snapshotter.GetSnapshotInterval()) - - mgrList, err := manager.List() - require.NoError(t, err) - storeList, err := store.List() - require.NoError(t, err) - - require.NotEmpty(t, storeList) - assert.Equal(t, storeList, mgrList) - - // list should not block or error on busy managers - manager = setupBusyManager(t) - list, err := manager.List() - require.NoError(t, err) - assert.Equal(t, []*types.Snapshot{}, list) - - require.NoError(t, manager.Close()) -} - -func TestManager_LoadChunk(t *testing.T) { - store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockSnapshotter{}, nil, log.NewNopLogger()) - - // Existing chunk should return body - chunk, err := manager.LoadChunk(2, 1, 1) - require.NoError(t, err) - assert.Equal(t, []byte{2, 1, 1}, chunk) - - // Missing chunk should return nil - chunk, err = manager.LoadChunk(2, 1, 9) - require.NoError(t, err) - assert.Nil(t, chunk) - - // LoadChunk should not block or error on busy managers - manager = setupBusyManager(t) - chunk, err = manager.LoadChunk(2, 1, 0) - require.NoError(t, err) - assert.Nil(t, chunk) -} - -func TestManager_Take(t *testing.T) { - store := setupStore(t) - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - snapshotter := &mockSnapshotter{ - items: items, - prunedHeights: make(map[int64]struct{}), - } - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - // nil manager should return error - _, err = (*snapshots.Manager)(nil).Create(1) - require.Error(t, err) - - // creating a snapshot at a lower height than the latest should error - _, err = manager.Create(3) - require.Error(t, err) - _, didPruneHeight := snapshotter.prunedHeights[3] - require.True(t, didPruneHeight) - - // creating a snapshot at a higher height should be fine, and should return it - snapshot, err := manager.Create(5) - require.NoError(t, err) - _, didPruneHeight = snapshotter.prunedHeights[5] - require.True(t, didPruneHeight) - - assert.Equal(t, &types.Snapshot{ - Height: 5, - Format: snapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - // creating a snapshot while a different snapshot is being created should error - manager = setupBusyManager(t) - _, err = manager.Create(9) - require.Error(t, err) -} - -func TestManager_Prune(t *testing.T) { - store := setupStore(t) - snapshotter := &mockSnapshotter{} - snapshotter.SetSnapshotInterval(opts.Interval) - manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) - - pruned, err := manager.Prune(2) - require.NoError(t, err) - assert.EqualValues(t, 1, pruned) - - list, err := manager.List() - require.NoError(t, err) - assert.Len(t, list, 3) - - // Prune should error while a snapshot is being taken - manager = setupBusyManager(t) - _, err = manager.Prune(2) - require.Error(t, err) -} - -func TestManager_Restore(t *testing.T) { - store := setupStore(t) - target := &mockSnapshotter{ - prunedHeights: make(map[int64]struct{}), - } - extSnapshotter := newExtSnapshotter(0) - manager := snapshots.NewManager(store, opts, target, nil, log.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - expectItems := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - - chunks := snapshotItems(expectItems, newExtSnapshotter(10)) - - // Restore errors on invalid format - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: 0, - Hash: []byte{1, 2, 3}, - Chunks: uint32(len(chunks)), - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - require.ErrorIs(t, err, types.ErrUnknownFormat) - - // Restore errors on no chunks - err = manager.Restore(types.Snapshot{Height: 3, Format: types.CurrentFormat, Hash: []byte{1, 2, 3}}) - require.Error(t, err) - - // Restore errors on chunk and chunkhashes mismatch - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 4, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - - // Starting a restore works - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 1, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.NoError(t, err) - - // While the restore is in progress, any other operations fail - _, err = manager.Create(4) - require.Error(t, err) - _, didPruneHeight := target.prunedHeights[4] - require.True(t, didPruneHeight) - - _, err = manager.Prune(1) - require.Error(t, err) - - // Feeding an invalid chunk should error due to invalid checksum, but not abort restoration. - _, err = manager.RestoreChunk([]byte{9, 9, 9}) - require.Error(t, err) - require.True(t, errors.Is(err, types.ErrChunkHashMismatch)) - - // Feeding the chunks should work - for i, chunk := range chunks { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } - - assert.Equal(t, expectItems, target.items) - assert.Equal(t, 10, len(extSnapshotter.state)) - - // The snapshot is saved in local snapshot store - snapshots, err := store.List() - require.NoError(t, err) - snapshot := snapshots[0] - require.Equal(t, uint64(3), snapshot.Height) - require.Equal(t, types.CurrentFormat, snapshot.Format) - - // Starting a new restore should fail now, because the target already has contents. - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 3, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - - // But if we clear out the target we should be able to start a new restore. This time we'll - // fail it with a checksum error. That error should stop the operation, so that we can do - // a prune operation right after. - target.items = nil - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 1, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.NoError(t, err) -} - -func TestManager_TakeError(t *testing.T) { - snapshotter := &mockErrorSnapshotter{} - store, err := snapshots.NewStore(db.NewMemDB(), GetTempDir(t)) - require.NoError(t, err) - manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger()) - - _, err = manager.Create(1) - require.Error(t, err) -} diff --git a/store/snapshots/store.go b/store/snapshots/store.go deleted file mode 100644 index c58461a68b..0000000000 --- a/store/snapshots/store.go +++ /dev/null @@ -1,376 +0,0 @@ -package snapshots - -import ( - "crypto/sha256" - "encoding/binary" - "fmt" - "hash" - "io" - "math" - "os" - "path/filepath" - "strconv" - "sync" - - db "github.com/cosmos/cosmos-db" - "github.com/cosmos/gogoproto/proto" - - "cosmossdk.io/errors" - "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" -) - -const ( - // keyPrefixSnapshot is the prefix for snapshot database keys - keyPrefixSnapshot byte = 0x01 -) - -// Store is a snapshot store, containing snapshot metadata and binary chunks. -type Store struct { - db db.DB - dir string - - mtx sync.Mutex - saving map[uint64]bool // heights currently being saved -} - -// NewStore creates a new snapshot store. -func NewStore(db db.DB, dir string) (*Store, error) { - if dir == "" { - return nil, errors.Wrap(storetypes.ErrLogic, "snapshot directory not given") - } - err := os.MkdirAll(dir, 0o755) - if err != nil { - return nil, errors.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - return &Store{ - db: db, - dir: dir, - saving: make(map[uint64]bool), - }, nil -} - -// Delete deletes a snapshot. -func (s *Store) Delete(height uint64, format uint32) error { - s.mtx.Lock() - saving := s.saving[height] - s.mtx.Unlock() - if saving { - return errors.Wrapf(storetypes.ErrConflict, - "snapshot for height %v format %v is currently being saved", height, format) - } - err := s.db.DeleteSync(encodeKey(height, format)) - if err != nil { - return errors.Wrapf(err, "failed to delete snapshot for height %v format %v", - height, format) - } - err = os.RemoveAll(s.pathSnapshot(height, format)) - return errors.Wrapf(err, "failed to delete snapshot chunks for height %v format %v", - height, format) -} - -// Get fetches snapshot info from the database. -func (s *Store) Get(height uint64, format uint32) (*types.Snapshot, error) { - bytes, err := s.db.Get(encodeKey(height, format)) - if err != nil { - return nil, errors.Wrapf(err, "failed to fetch snapshot metadata for height %v format %v", - height, format) - } - if bytes == nil { - return nil, nil - } - snapshot := &types.Snapshot{} - err = proto.Unmarshal(bytes, snapshot) - if err != nil { - return nil, errors.Wrapf(err, "failed to decode snapshot metadata for height %v format %v", - height, format) - } - if snapshot.Metadata.ChunkHashes == nil { - snapshot.Metadata.ChunkHashes = [][]byte{} - } - return snapshot, nil -} - -// GetLatest fetches the latest snapshot from the database, if any. -func (s *Store) GetLatest() (*types.Snapshot, error) { - iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) - if err != nil { - return nil, errors.Wrap(err, "failed to find latest snapshot") - } - defer iter.Close() - - var snapshot *types.Snapshot - if iter.Valid() { - snapshot = &types.Snapshot{} - err := proto.Unmarshal(iter.Value(), snapshot) - if err != nil { - return nil, errors.Wrap(err, "failed to decode latest snapshot") - } - } - err = iter.Error() - return snapshot, errors.Wrap(err, "failed to find latest snapshot") -} - -// List lists snapshots, in reverse order (newest first). -func (s *Store) List() ([]*types.Snapshot, error) { - iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) - if err != nil { - return nil, errors.Wrap(err, "failed to list snapshots") - } - defer iter.Close() - - snapshots := make([]*types.Snapshot, 0) - for ; iter.Valid(); iter.Next() { - snapshot := &types.Snapshot{} - err := proto.Unmarshal(iter.Value(), snapshot) - if err != nil { - return nil, errors.Wrap(err, "failed to decode snapshot info") - } - snapshots = append(snapshots, snapshot) - } - return snapshots, iter.Error() -} - -// Load loads a snapshot (both metadata and binary chunks). The chunks must be consumed and closed. -// Returns nil if the snapshot does not exist. -func (s *Store) Load(height uint64, format uint32) (*types.Snapshot, <-chan io.ReadCloser, error) { - snapshot, err := s.Get(height, format) - if snapshot == nil || err != nil { - return nil, nil, err - } - - ch := make(chan io.ReadCloser) - - go func() { - defer close(ch) - for i := uint32(0); i < snapshot.Chunks; i++ { - pr, pw := io.Pipe() - ch <- pr - chunk, err := s.loadChunkFile(height, format, i) - if err != nil { - _ = pw.CloseWithError(err) - return - } - err = func() error { - defer chunk.Close() - - if _, err := io.Copy(pw, chunk); err != nil { - _ = pw.CloseWithError(err) - return fmt.Errorf("failed to copy chunk %d: %w", i, err) - } - - return pw.Close() - }() - if err != nil { - return - } - } - }() - - return snapshot, ch, nil -} - -// LoadChunk loads a chunk from disk, or returns nil if it does not exist. The caller must call -// Close() on it when done. -func (s *Store) LoadChunk(height uint64, format, chunk uint32) (io.ReadCloser, error) { - path := s.PathChunk(height, format, chunk) - file, err := os.Open(path) - if os.IsNotExist(err) { - return nil, nil - } - return file, err -} - -// loadChunkFile loads a chunk from disk, and errors if it does not exist. -func (s *Store) loadChunkFile(height uint64, format, chunk uint32) (io.ReadCloser, error) { - path := s.PathChunk(height, format, chunk) - return os.Open(path) -} - -// Prune removes old snapshots. The given number of most recent heights (regardless of format) are retained. -func (s *Store) Prune(retain uint32) (uint64, error) { - iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) - if err != nil { - return 0, errors.Wrap(err, "failed to prune snapshots") - } - defer iter.Close() - - pruned := uint64(0) - prunedHeights := make(map[uint64]bool) - skip := make(map[uint64]bool) - for ; iter.Valid(); iter.Next() { - height, format, err := decodeKey(iter.Key()) - if err != nil { - return 0, errors.Wrap(err, "failed to prune snapshots") - } - if skip[height] || uint32(len(skip)) < retain { - skip[height] = true - continue - } - err = s.Delete(height, format) - if err != nil { - return 0, errors.Wrap(err, "failed to prune snapshots") - } - pruned++ - prunedHeights[height] = true - } - // Since Delete() deletes a specific format, while we want to prune a height, we clean up - // the height directory as well - for height, ok := range prunedHeights { - if ok { - err = os.Remove(s.pathHeight(height)) - if err != nil { - return 0, errors.Wrapf(err, "failed to remove snapshot directory for height %v", height) - } - } - } - return pruned, iter.Error() -} - -// Save saves a snapshot to disk, returning it. -func (s *Store) Save( - height uint64, format uint32, chunks <-chan io.ReadCloser, -) (*types.Snapshot, error) { - defer DrainChunks(chunks) - if height == 0 { - return nil, errors.Wrap(storetypes.ErrLogic, "snapshot height cannot be 0") - } - - s.mtx.Lock() - saving := s.saving[height] - s.saving[height] = true - s.mtx.Unlock() - if saving { - return nil, errors.Wrapf(storetypes.ErrConflict, - "a snapshot for height %v is already being saved", height) - } - defer func() { - s.mtx.Lock() - delete(s.saving, height) - s.mtx.Unlock() - }() - - exists, err := s.db.Has(encodeKey(height, format)) - if err != nil { - return nil, err - } - if exists { - return nil, errors.Wrapf(storetypes.ErrConflict, - "snapshot already exists for height %v format %v", height, format) - } - - snapshot := &types.Snapshot{ - Height: height, - Format: format, - } - - dirCreated := false - index := uint32(0) - snapshotHasher := sha256.New() - chunkHasher := sha256.New() - for chunkBody := range chunks { - // Only create the snapshot directory on encountering the first chunk. - // If the directory disappears during chunk saving, - // the whole operation will fail anyway. - if !dirCreated { - dir := s.pathSnapshot(height, format) - if err := os.MkdirAll(dir, 0o755); err != nil { - return nil, errors.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - dirCreated = true - } - - if err := s.saveChunk(chunkBody, index, snapshot, chunkHasher, snapshotHasher); err != nil { - return nil, err - } - index++ - } - snapshot.Chunks = index - snapshot.Hash = snapshotHasher.Sum(nil) - return snapshot, s.saveSnapshot(snapshot) -} - -// saveChunk saves the given chunkBody with the given index to its appropriate path on disk. -// The hash of the chunk is appended to the snapshot's metadata, -// and the overall snapshot hash is updated with the chunk content too. -func (s *Store) saveChunk(chunkBody io.ReadCloser, index uint32, snapshot *types.Snapshot, chunkHasher, snapshotHasher hash.Hash) error { - defer chunkBody.Close() - - path := s.PathChunk(snapshot.Height, snapshot.Format, index) - chunkFile, err := os.Create(path) - if err != nil { - return errors.Wrapf(err, "failed to create snapshot chunk file %q", path) - } - defer chunkFile.Close() - - chunkHasher.Reset() - if _, err := io.Copy(io.MultiWriter(chunkFile, chunkHasher, snapshotHasher), chunkBody); err != nil { - return errors.Wrapf(err, "failed to generate snapshot chunk %d", index) - } - - if err := chunkFile.Close(); err != nil { - return errors.Wrapf(err, "failed to close snapshot chunk file %d", index) - } - - if err := chunkBody.Close(); err != nil { - return errors.Wrapf(err, "failed to close snapshot chunk body %d", index) - } - - snapshot.Metadata.ChunkHashes = append(snapshot.Metadata.ChunkHashes, chunkHasher.Sum(nil)) - return nil -} - -// saveChunkContent save the chunk to disk -func (s *Store) saveChunkContent(chunk []byte, index uint32, snapshot *types.Snapshot) error { - path := s.PathChunk(snapshot.Height, snapshot.Format, index) - return os.WriteFile(path, chunk, 0o600) -} - -// saveSnapshot saves snapshot metadata to the database. -func (s *Store) saveSnapshot(snapshot *types.Snapshot) error { - value, err := proto.Marshal(snapshot) - if err != nil { - return errors.Wrap(err, "failed to encode snapshot metadata") - } - err = s.db.SetSync(encodeKey(snapshot.Height, snapshot.Format), value) - return errors.Wrap(err, "failed to store snapshot") -} - -// pathHeight generates the path to a height, containing multiple snapshot formats. -func (s *Store) pathHeight(height uint64) string { - return filepath.Join(s.dir, strconv.FormatUint(height, 10)) -} - -// pathSnapshot generates a snapshot path, as a specific format under a height. -func (s *Store) pathSnapshot(height uint64, format uint32) string { - return filepath.Join(s.pathHeight(height), strconv.FormatUint(uint64(format), 10)) -} - -// PathChunk generates a snapshot chunk path. -func (s *Store) PathChunk(height uint64, format, chunk uint32) string { - return filepath.Join(s.pathSnapshot(height, format), strconv.FormatUint(uint64(chunk), 10)) -} - -// decodeKey decodes a snapshot key. -func decodeKey(k []byte) (uint64, uint32, error) { - if len(k) != 13 { - return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key with length %v", len(k)) - } - if k[0] != keyPrefixSnapshot { - return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key prefix %x", k[0]) - } - height := binary.BigEndian.Uint64(k[1:9]) - format := binary.BigEndian.Uint32(k[9:13]) - return height, format, nil -} - -// encodeKey encodes a snapshot key. -func encodeKey(height uint64, format uint32) []byte { - k := make([]byte, 13) - k[0] = keyPrefixSnapshot - binary.BigEndian.PutUint64(k[1:], height) - binary.BigEndian.PutUint32(k[9:], format) - return k -} diff --git a/store/snapshots/store_test.go b/store/snapshots/store_test.go deleted file mode 100644 index f4ff0ef74d..0000000000 --- a/store/snapshots/store_test.go +++ /dev/null @@ -1,333 +0,0 @@ -package snapshots_test - -import ( - "bytes" - "errors" - "io" - "testing" - "time" - - db "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/snapshots" - "cosmossdk.io/store/snapshots/types" -) - -func setupStore(t *testing.T) *snapshots.Store { - t.Helper() - store, err := snapshots.NewStore(db.NewMemDB(), GetTempDir(t)) - require.NoError(t, err) - - _, err = store.Save(1, 1, makeChunks([][]byte{ - {1, 1, 0}, {1, 1, 1}, - })) - require.NoError(t, err) - _, err = store.Save(2, 1, makeChunks([][]byte{ - {2, 1, 0}, {2, 1, 1}, - })) - require.NoError(t, err) - _, err = store.Save(2, 2, makeChunks([][]byte{ - {2, 2, 0}, {2, 2, 1}, {2, 2, 2}, - })) - require.NoError(t, err) - _, err = store.Save(3, 2, makeChunks([][]byte{ - {3, 2, 0}, {3, 2, 1}, {3, 2, 2}, - })) - require.NoError(t, err) - - return store -} - -func TestNewStore(t *testing.T) { - tempdir := GetTempDir(t) - _, err := snapshots.NewStore(db.NewMemDB(), tempdir) - - require.NoError(t, err) -} - -func TestNewStore_ErrNoDir(t *testing.T) { - _, err := snapshots.NewStore(db.NewMemDB(), "") - require.Error(t, err) -} - -func TestStore_Delete(t *testing.T) { - store := setupStore(t) - // Deleting a snapshot should remove it - err := store.Delete(2, 2) - require.NoError(t, err) - - snapshot, err := store.Get(2, 2) - require.NoError(t, err) - assert.Nil(t, snapshot) - - snapshots, err := store.List() - require.NoError(t, err) - assert.Len(t, snapshots, 3) - - // Deleting it again should not error - err = store.Delete(2, 2) - require.NoError(t, err) - - // Deleting a snapshot being saved should error - ch := make(chan io.ReadCloser) - go func() { - _, err := store.Save(9, 1, ch) - require.NoError(t, err) - }() - - time.Sleep(10 * time.Millisecond) - err = store.Delete(9, 1) - require.Error(t, err) - - // But after it's saved it should work - close(ch) - time.Sleep(10 * time.Millisecond) - err = store.Delete(9, 1) - require.NoError(t, err) -} - -func TestStore_Get(t *testing.T) { - store := setupStore(t) - - // Loading a missing snapshot should return nil - snapshot, err := store.Get(9, 9) - require.NoError(t, err) - assert.Nil(t, snapshot) - - // Loading a snapshot should returns its metadata - snapshot, err = store.Get(2, 1) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 2, - Format: 1, - Chunks: 2, - Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{ - {2, 1, 0}, {2, 1, 1}, - }), - }, - }, snapshot) -} - -func TestStore_GetLatest(t *testing.T) { - store := setupStore(t) - // Loading a missing snapshot should return nil - snapshot, err := store.GetLatest() - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 3, - Format: 2, - Chunks: 3, - Hash: hash([][]byte{ - {3, 2, 0}, - {3, 2, 1}, - {3, 2, 2}, - }), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{ - {3, 2, 0}, - {3, 2, 1}, - {3, 2, 2}, - }), - }, - }, snapshot) -} - -func TestStore_List(t *testing.T) { - store := setupStore(t) - snapshots, err := store.List() - require.NoError(t, err) - - require.Equal(t, []*types.Snapshot{ - { - Height: 3, Format: 2, Chunks: 3, Hash: hash([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}})}, - }, - { - Height: 2, Format: 2, Chunks: 3, Hash: hash([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}})}, - }, - { - Height: 2, Format: 1, Chunks: 2, Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 1, 0}, {2, 1, 1}})}, - }, - { - Height: 1, Format: 1, Chunks: 2, Hash: hash([][]byte{{1, 1, 0}, {1, 1, 1}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{1, 1, 0}, {1, 1, 1}})}, - }, - }, snapshots) -} - -func TestStore_Load(t *testing.T) { - store := setupStore(t) - // Loading a missing snapshot should return nil - snapshot, chunks, err := store.Load(9, 9) - require.NoError(t, err) - assert.Nil(t, snapshot) - assert.Nil(t, chunks) - - // Loading a snapshot should returns its metadata and chunks - snapshot, chunks, err = store.Load(2, 1) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 2, - Format: 1, - Chunks: 2, - Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{ - {2, 1, 0}, {2, 1, 1}, - }), - }, - }, snapshot) - - for i := uint32(0); i < snapshot.Chunks; i++ { - reader, ok := <-chunks - require.True(t, ok) - chunk, err := io.ReadAll(reader) - require.NoError(t, err) - err = reader.Close() - require.NoError(t, err) - assert.Equal(t, []byte{2, 1, byte(i)}, chunk) - } - assert.Empty(t, chunks) -} - -func TestStore_LoadChunk(t *testing.T) { - store := setupStore(t) - // Loading a missing snapshot should return nil - chunk, err := store.LoadChunk(9, 9, 0) - require.NoError(t, err) - assert.Nil(t, chunk) - - // Loading a missing chunk index should return nil - chunk, err = store.LoadChunk(2, 1, 2) - require.NoError(t, err) - require.Nil(t, chunk) - - // Loading a chunk should returns a content reader - chunk, err = store.LoadChunk(2, 1, 0) - require.NoError(t, err) - require.NotNil(t, chunk) - body, err := io.ReadAll(chunk) - require.NoError(t, err) - assert.Equal(t, []byte{2, 1, 0}, body) - err = chunk.Close() - require.NoError(t, err) -} - -func TestStore_Prune(t *testing.T) { - store := setupStore(t) - // Pruning too many snapshots should be fine - pruned, err := store.Prune(4) - require.NoError(t, err) - assert.EqualValues(t, 0, pruned) - - snapshots, err := store.List() - require.NoError(t, err) - assert.Len(t, snapshots, 4) - - // Pruning until the last two heights should leave three snapshots (for two heights) - pruned, err = store.Prune(2) - require.NoError(t, err) - assert.EqualValues(t, 1, pruned) - - snapshots, err = store.List() - require.NoError(t, err) - require.Equal(t, []*types.Snapshot{ - { - Height: 3, Format: 2, Chunks: 3, Hash: hash([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}})}, - }, - { - Height: 2, Format: 2, Chunks: 3, Hash: hash([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}})}, - }, - { - Height: 2, Format: 1, Chunks: 2, Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 1, 0}, {2, 1, 1}})}, - }, - }, snapshots) - - // Pruning all heights should also be fine - pruned, err = store.Prune(0) - require.NoError(t, err) - assert.EqualValues(t, 3, pruned) - - snapshots, err = store.List() - require.NoError(t, err) - assert.Empty(t, snapshots) -} - -func TestStore_Save(t *testing.T) { - store := setupStore(t) - // Saving a snapshot should work - snapshot, err := store.Save(4, 1, makeChunks([][]byte{{1}, {2}})) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 4, - Format: 1, - Chunks: 2, - Hash: hash([][]byte{{1}, {2}}), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{{1}, {2}}), - }, - }, snapshot) - loaded, err := store.Get(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, loaded) - - // Saving an existing snapshot should error - _, err = store.Save(4, 1, makeChunks([][]byte{{1}, {2}})) - require.Error(t, err) - - // Saving at height 0 should error - _, err = store.Save(0, 1, makeChunks([][]byte{{1}, {2}})) - require.Error(t, err) - - // Saving at format 0 should be fine - _, err = store.Save(1, 0, makeChunks([][]byte{{1}, {2}})) - require.NoError(t, err) - - // Saving a snapshot with no chunks should be fine, as should loading it - _, err = store.Save(5, 1, makeChunks([][]byte{})) - require.NoError(t, err) - snapshot, chunks, err := store.Load(5, 1) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{Height: 5, Format: 1, Hash: hash([][]byte{}), Metadata: types.Metadata{ChunkHashes: [][]byte{}}}, snapshot) - assert.Empty(t, chunks) - - // Saving a snapshot should error if a chunk reader returns an error, and it should empty out - // the channel - someErr := errors.New("boom") - pr, pw := io.Pipe() - err = pw.CloseWithError(someErr) - require.NoError(t, err) - - ch := make(chan io.ReadCloser, 2) - ch <- pr - ch <- io.NopCloser(bytes.NewBuffer([]byte{0xff})) - close(ch) - - _, err = store.Save(6, 1, ch) - require.Error(t, err) - require.True(t, errors.Is(err, someErr)) - assert.Empty(t, ch) - - // Saving a snapshot should error if a snapshot is already in progress for the same height, - // regardless of format. However, a different height should succeed. - ch = make(chan io.ReadCloser) - go func() { - _, err := store.Save(7, 1, ch) - require.NoError(t, err) - }() - time.Sleep(10 * time.Millisecond) - _, err = store.Save(7, 2, makeChunks(nil)) - require.Error(t, err) - _, err = store.Save(8, 1, makeChunks(nil)) - require.NoError(t, err) - close(ch) -} diff --git a/store/snapshots/stream.go b/store/snapshots/stream.go deleted file mode 100644 index e010f92244..0000000000 --- a/store/snapshots/stream.go +++ /dev/null @@ -1,113 +0,0 @@ -package snapshots - -import ( - "bufio" - "compress/zlib" - "io" - - protoio "github.com/cosmos/gogoproto/io" - "github.com/cosmos/gogoproto/proto" - - "cosmossdk.io/errors" -) - -const ( - // Do not change chunk size without new snapshot format (must be uniform across nodes) - snapshotChunkSize = uint64(10e6) - snapshotBufferSize = int(snapshotChunkSize) - // Do not change compression level without new snapshot format (must be uniform across nodes) - snapshotCompressionLevel = 7 -) - -// StreamWriter set up a stream pipeline to serialize snapshot nodes: -// Exported Items -> delimited Protobuf -> zlib -> buffer -> chunkWriter -> chan io.ReadCloser -type StreamWriter struct { - chunkWriter *ChunkWriter - bufWriter *bufio.Writer - zWriter *zlib.Writer - protoWriter protoio.WriteCloser -} - -// NewStreamWriter set up a stream pipeline to serialize snapshot DB records. -func NewStreamWriter(ch chan<- io.ReadCloser) *StreamWriter { - chunkWriter := NewChunkWriter(ch, snapshotChunkSize) - bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) - zWriter, err := zlib.NewWriterLevel(bufWriter, snapshotCompressionLevel) - if err != nil { - chunkWriter.CloseWithError(errors.Wrap(err, "zlib failure")) - return nil - } - protoWriter := protoio.NewDelimitedWriter(zWriter) - return &StreamWriter{ - chunkWriter: chunkWriter, - bufWriter: bufWriter, - zWriter: zWriter, - protoWriter: protoWriter, - } -} - -// WriteMsg implements protoio.Write interface -func (sw *StreamWriter) WriteMsg(msg proto.Message) error { - return sw.protoWriter.WriteMsg(msg) -} - -// Close implements io.Closer interface -func (sw *StreamWriter) Close() error { - if err := sw.protoWriter.Close(); err != nil { - sw.chunkWriter.CloseWithError(err) - return err - } - if err := sw.bufWriter.Flush(); err != nil { - sw.chunkWriter.CloseWithError(err) - return err - } - return sw.chunkWriter.Close() -} - -// CloseWithError pass error to chunkWriter -func (sw *StreamWriter) CloseWithError(err error) { - sw.chunkWriter.CloseWithError(err) -} - -// StreamReader set up a restore stream pipeline -// chan io.ReadCloser -> chunkReader -> zlib -> delimited Protobuf -> ExportNode -type StreamReader struct { - chunkReader *ChunkReader - zReader io.ReadCloser - protoReader protoio.ReadCloser -} - -// NewStreamReader set up a restore stream pipeline. -func NewStreamReader(chunks <-chan io.ReadCloser) (*StreamReader, error) { - chunkReader := NewChunkReader(chunks) - zReader, err := zlib.NewReader(chunkReader) - if err != nil { - return nil, errors.Wrap(err, "zlib failure") - } - protoReader := protoio.NewDelimitedReader(zReader, snapshotMaxItemSize) - return &StreamReader{ - chunkReader: chunkReader, - zReader: zReader, - protoReader: protoReader, - }, nil -} - -// ReadMsg implements protoio.Reader interface -func (sr *StreamReader) ReadMsg(msg proto.Message) error { - return sr.protoReader.ReadMsg(msg) -} - -// Close implements io.Closer interface -func (sr *StreamReader) Close() error { - var err error - if err1 := sr.protoReader.Close(); err1 != nil { - err = err1 - } - if err2 := sr.zReader.Close(); err2 != nil { - err = err2 - } - if err3 := sr.chunkReader.Close(); err3 != nil { - err = err3 - } - return err -} diff --git a/store/snapshots/types/convert.go b/store/snapshots/types/convert.go deleted file mode 100644 index a5ed10929c..0000000000 --- a/store/snapshots/types/convert.go +++ /dev/null @@ -1,39 +0,0 @@ -package types - -import ( - abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" - proto "github.com/cosmos/gogoproto/proto" - - "cosmossdk.io/errors" -) - -// SnapshotFromABCI converts an ABCI snapshot to a snapshot. Mainly to decode the SDK metadata. -func SnapshotFromABCI(in *abci.Snapshot) (Snapshot, error) { - snapshot := Snapshot{ - Height: in.Height, - Format: in.Format, - Chunks: in.Chunks, - Hash: in.Hash, - } - err := proto.Unmarshal(in.Metadata, &snapshot.Metadata) - if err != nil { - return Snapshot{}, errors.Wrap(err, "failed to unmarshal snapshot metadata") - } - return snapshot, nil -} - -// ToABCI converts a Snapshot to its ABCI representation. Mainly to encode the SDK metadata. -func (s Snapshot) ToABCI() (abci.Snapshot, error) { - out := abci.Snapshot{ - Height: s.Height, - Format: s.Format, - Chunks: s.Chunks, - Hash: s.Hash, - } - var err error - out.Metadata, err = proto.Marshal(&s.Metadata) - if err != nil { - return abci.Snapshot{}, errors.Wrap(err, "failed to marshal snapshot metadata") - } - return out, nil -} diff --git a/store/snapshots/types/errors.go b/store/snapshots/types/errors.go deleted file mode 100644 index c1b5db532e..0000000000 --- a/store/snapshots/types/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package types - -import ( - "errors" -) - -var ( - // ErrUnknownFormat is returned when an unknown format is used. - ErrUnknownFormat = errors.New("unknown snapshot format") - - // ErrChunkHashMismatch is returned when chunk hash verification failed. - ErrChunkHashMismatch = errors.New("chunk hash verification failed") - - // ErrInvalidMetadata is returned when the snapshot metadata is invalid. - ErrInvalidMetadata = errors.New("invalid snapshot metadata") - - // ErrInvalidSnapshotVersion is returned when the snapshot version is invalid - ErrInvalidSnapshotVersion = errors.New("invalid snapshot version") -) diff --git a/store/snapshots/types/format.go b/store/snapshots/types/format.go deleted file mode 100644 index 317b6a6e32..0000000000 --- a/store/snapshots/types/format.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// CurrentFormat is the currently used format for snapshots. Snapshots using the same format -// must be identical across all nodes for a given height, so this must be bumped when the binary -// snapshot output changes. -const CurrentFormat uint32 = 3 diff --git a/store/snapshots/types/options.go b/store/snapshots/types/options.go deleted file mode 100644 index 9c6ec79a11..0000000000 --- a/store/snapshots/types/options.go +++ /dev/null @@ -1,18 +0,0 @@ -package types - -// SnapshotOptions defines the snapshot strategy used when determining which -// heights are snapshotted for state sync. -type SnapshotOptions struct { - // Interval defines at which heights the snapshot is taken. - Interval uint64 - - // KeepRecent defines how many snapshots to keep in heights. - KeepRecent uint32 -} - -func NewSnapshotOptions(interval uint64, keepRecent uint32) SnapshotOptions { - return SnapshotOptions{ - Interval: interval, - KeepRecent: keepRecent, - } -} diff --git a/store/snapshots/types/snapshot.pb.go b/store/snapshots/types/snapshot.pb.go deleted file mode 100644 index e81660c459..0000000000 --- a/store/snapshots/types/snapshot.pb.go +++ /dev/null @@ -1,2008 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/snapshots/v1/snapshot.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/cosmos-proto" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Snapshot contains Tendermint state sync snapshot info. -type Snapshot struct { - Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` - Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` - Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{0} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetHeight() uint64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *Snapshot) GetFormat() uint32 { - if m != nil { - return m.Format - } - return 0 -} - -func (m *Snapshot) GetChunks() uint32 { - if m != nil { - return m.Chunks - } - return 0 -} - -func (m *Snapshot) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *Snapshot) GetMetadata() Metadata { - if m != nil { - return m.Metadata - } - return Metadata{} -} - -// Metadata contains SDK-specific snapshot metadata. -type Metadata struct { - ChunkHashes [][]byte `protobuf:"bytes,1,rep,name=chunk_hashes,json=chunkHashes,proto3" json:"chunk_hashes,omitempty"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{1} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return m.Size() -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetChunkHashes() [][]byte { - if m != nil { - return m.ChunkHashes - } - return nil -} - -// SnapshotItem is an item contained in a rootmulti.Store snapshot. -type SnapshotItem struct { - // item is the specific type of snapshot item. - // - // Types that are valid to be assigned to Item: - // - // *SnapshotItem_Store - // *SnapshotItem_IAVL - // *SnapshotItem_Extension - // *SnapshotItem_ExtensionPayload - Item isSnapshotItem_Item `protobuf_oneof:"item"` -} - -func (m *SnapshotItem) Reset() { *m = SnapshotItem{} } -func (m *SnapshotItem) String() string { return proto.CompactTextString(m) } -func (*SnapshotItem) ProtoMessage() {} -func (*SnapshotItem) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{2} -} -func (m *SnapshotItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotItem.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotItem.Merge(m, src) -} -func (m *SnapshotItem) XXX_Size() int { - return m.Size() -} -func (m *SnapshotItem) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotItem proto.InternalMessageInfo - -type isSnapshotItem_Item interface { - isSnapshotItem_Item() - MarshalTo([]byte) (int, error) - Size() int -} - -type SnapshotItem_Store struct { - Store *SnapshotStoreItem `protobuf:"bytes,1,opt,name=store,proto3,oneof" json:"store,omitempty"` -} -type SnapshotItem_IAVL struct { - IAVL *SnapshotIAVLItem `protobuf:"bytes,2,opt,name=iavl,proto3,oneof" json:"iavl,omitempty"` -} -type SnapshotItem_Extension struct { - Extension *SnapshotExtensionMeta `protobuf:"bytes,3,opt,name=extension,proto3,oneof" json:"extension,omitempty"` -} -type SnapshotItem_ExtensionPayload struct { - ExtensionPayload *SnapshotExtensionPayload `protobuf:"bytes,4,opt,name=extension_payload,json=extensionPayload,proto3,oneof" json:"extension_payload,omitempty"` -} - -func (*SnapshotItem_Store) isSnapshotItem_Item() {} -func (*SnapshotItem_IAVL) isSnapshotItem_Item() {} -func (*SnapshotItem_Extension) isSnapshotItem_Item() {} -func (*SnapshotItem_ExtensionPayload) isSnapshotItem_Item() {} - -func (m *SnapshotItem) GetItem() isSnapshotItem_Item { - if m != nil { - return m.Item - } - return nil -} - -func (m *SnapshotItem) GetStore() *SnapshotStoreItem { - if x, ok := m.GetItem().(*SnapshotItem_Store); ok { - return x.Store - } - return nil -} - -func (m *SnapshotItem) GetIAVL() *SnapshotIAVLItem { - if x, ok := m.GetItem().(*SnapshotItem_IAVL); ok { - return x.IAVL - } - return nil -} - -func (m *SnapshotItem) GetExtension() *SnapshotExtensionMeta { - if x, ok := m.GetItem().(*SnapshotItem_Extension); ok { - return x.Extension - } - return nil -} - -func (m *SnapshotItem) GetExtensionPayload() *SnapshotExtensionPayload { - if x, ok := m.GetItem().(*SnapshotItem_ExtensionPayload); ok { - return x.ExtensionPayload - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SnapshotItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SnapshotItem_Store)(nil), - (*SnapshotItem_IAVL)(nil), - (*SnapshotItem_Extension)(nil), - (*SnapshotItem_ExtensionPayload)(nil), - } -} - -// SnapshotStoreItem contains metadata about a snapshotted store. -type SnapshotStoreItem struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *SnapshotStoreItem) Reset() { *m = SnapshotStoreItem{} } -func (m *SnapshotStoreItem) String() string { return proto.CompactTextString(m) } -func (*SnapshotStoreItem) ProtoMessage() {} -func (*SnapshotStoreItem) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{3} -} -func (m *SnapshotStoreItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotStoreItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotStoreItem.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotStoreItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotStoreItem.Merge(m, src) -} -func (m *SnapshotStoreItem) XXX_Size() int { - return m.Size() -} -func (m *SnapshotStoreItem) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotStoreItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotStoreItem proto.InternalMessageInfo - -func (m *SnapshotStoreItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// SnapshotIAVLItem is an exported IAVL node. -type SnapshotIAVLItem struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // version is block height - Version int64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` - // height is depth of the tree. - Height int32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` -} - -func (m *SnapshotIAVLItem) Reset() { *m = SnapshotIAVLItem{} } -func (m *SnapshotIAVLItem) String() string { return proto.CompactTextString(m) } -func (*SnapshotIAVLItem) ProtoMessage() {} -func (*SnapshotIAVLItem) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{4} -} -func (m *SnapshotIAVLItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotIAVLItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotIAVLItem.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotIAVLItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotIAVLItem.Merge(m, src) -} -func (m *SnapshotIAVLItem) XXX_Size() int { - return m.Size() -} -func (m *SnapshotIAVLItem) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotIAVLItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotIAVLItem proto.InternalMessageInfo - -func (m *SnapshotIAVLItem) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *SnapshotIAVLItem) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *SnapshotIAVLItem) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *SnapshotIAVLItem) GetHeight() int32 { - if m != nil { - return m.Height - } - return 0 -} - -// SnapshotExtensionMeta contains metadata about an external snapshotter. -type SnapshotExtensionMeta struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` -} - -func (m *SnapshotExtensionMeta) Reset() { *m = SnapshotExtensionMeta{} } -func (m *SnapshotExtensionMeta) String() string { return proto.CompactTextString(m) } -func (*SnapshotExtensionMeta) ProtoMessage() {} -func (*SnapshotExtensionMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{5} -} -func (m *SnapshotExtensionMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotExtensionMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotExtensionMeta.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotExtensionMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotExtensionMeta.Merge(m, src) -} -func (m *SnapshotExtensionMeta) XXX_Size() int { - return m.Size() -} -func (m *SnapshotExtensionMeta) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotExtensionMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotExtensionMeta proto.InternalMessageInfo - -func (m *SnapshotExtensionMeta) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *SnapshotExtensionMeta) GetFormat() uint32 { - if m != nil { - return m.Format - } - return 0 -} - -// SnapshotExtensionPayload contains payloads of an external snapshotter. -type SnapshotExtensionPayload struct { - Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (m *SnapshotExtensionPayload) Reset() { *m = SnapshotExtensionPayload{} } -func (m *SnapshotExtensionPayload) String() string { return proto.CompactTextString(m) } -func (*SnapshotExtensionPayload) ProtoMessage() {} -func (*SnapshotExtensionPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{6} -} -func (m *SnapshotExtensionPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotExtensionPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotExtensionPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotExtensionPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotExtensionPayload.Merge(m, src) -} -func (m *SnapshotExtensionPayload) XXX_Size() int { - return m.Size() -} -func (m *SnapshotExtensionPayload) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotExtensionPayload.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotExtensionPayload proto.InternalMessageInfo - -func (m *SnapshotExtensionPayload) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func init() { - proto.RegisterType((*Snapshot)(nil), "cosmos.store.snapshots.v1.Snapshot") - proto.RegisterType((*Metadata)(nil), "cosmos.store.snapshots.v1.Metadata") - proto.RegisterType((*SnapshotItem)(nil), "cosmos.store.snapshots.v1.SnapshotItem") - proto.RegisterType((*SnapshotStoreItem)(nil), "cosmos.store.snapshots.v1.SnapshotStoreItem") - proto.RegisterType((*SnapshotIAVLItem)(nil), "cosmos.store.snapshots.v1.SnapshotIAVLItem") - proto.RegisterType((*SnapshotExtensionMeta)(nil), "cosmos.store.snapshots.v1.SnapshotExtensionMeta") - proto.RegisterType((*SnapshotExtensionPayload)(nil), "cosmos.store.snapshots.v1.SnapshotExtensionPayload") -} - -func init() { - proto.RegisterFile("cosmos/store/snapshots/v1/snapshot.proto", fileDescriptor_3d5cca1aa5b69183) -} - -var fileDescriptor_3d5cca1aa5b69183 = []byte{ - // 538 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x41, 0x6f, 0xd3, 0x3c, - 0x18, 0x8e, 0xd7, 0xb4, 0x5f, 0xf7, 0x26, 0x9f, 0xe8, 0xcc, 0x40, 0x61, 0x87, 0x2c, 0x84, 0x4b, - 0x24, 0x68, 0xba, 0x75, 0x88, 0x03, 0xda, 0x85, 0x8a, 0x49, 0xad, 0x00, 0x69, 0xf2, 0x24, 0x84, - 0xb8, 0x54, 0xde, 0x6a, 0x9a, 0xaa, 0x4d, 0x5d, 0xd5, 0x5e, 0x45, 0x8f, 0xfc, 0x03, 0xfe, 0x08, - 0x37, 0x7e, 0xc4, 0x8e, 0x13, 0x27, 0x4e, 0x13, 0x6a, 0xff, 0x02, 0x3f, 0x00, 0xd9, 0x4e, 0x0a, - 0xda, 0x52, 0x34, 0x6e, 0xef, 0xf3, 0xfa, 0x79, 0x1e, 0xfb, 0x7d, 0xec, 0x04, 0xa2, 0x33, 0x2e, - 0x52, 0x2e, 0x1a, 0x42, 0xf2, 0x29, 0x6b, 0x88, 0x31, 0x9d, 0x88, 0x84, 0x4b, 0xd1, 0x98, 0xed, - 0xaf, 0x40, 0x3c, 0x99, 0x72, 0xc9, 0xf1, 0x03, 0xc3, 0x8c, 0x35, 0x33, 0x5e, 0x31, 0xe3, 0xd9, - 0xfe, 0xce, 0x76, 0x9f, 0xf7, 0xb9, 0x66, 0x35, 0x54, 0x65, 0x04, 0x3b, 0x99, 0xa0, 0x6b, 0x16, - 0x32, 0xb5, 0x06, 0xe1, 0x17, 0x04, 0xd5, 0x93, 0xcc, 0x01, 0xdf, 0x87, 0x4a, 0xc2, 0x06, 0xfd, - 0x44, 0x7a, 0x28, 0x40, 0x91, 0x4d, 0x32, 0xa4, 0xfa, 0x1f, 0xf8, 0x34, 0xa5, 0xd2, 0xdb, 0x08, - 0x50, 0xf4, 0x3f, 0xc9, 0x90, 0xea, 0x9f, 0x25, 0xe7, 0xe3, 0xa1, 0xf0, 0x4a, 0xa6, 0x6f, 0x10, - 0xc6, 0x60, 0x27, 0x54, 0x24, 0x9e, 0x1d, 0xa0, 0xc8, 0x25, 0xba, 0xc6, 0x47, 0x50, 0x4d, 0x99, - 0xa4, 0x3d, 0x2a, 0xa9, 0x57, 0x0e, 0x50, 0xe4, 0x34, 0x1f, 0xc5, 0x6b, 0xe7, 0x88, 0xdf, 0x64, - 0xd4, 0x96, 0x7d, 0x71, 0xb5, 0x6b, 0x91, 0x95, 0x34, 0xac, 0x43, 0x35, 0x5f, 0xc3, 0x0f, 0xc1, - 0xd5, 0x1b, 0x76, 0xd5, 0x06, 0x4c, 0x78, 0x28, 0x28, 0x45, 0x2e, 0x71, 0x74, 0xaf, 0xad, 0x5b, - 0xe1, 0xcf, 0x0d, 0x70, 0xf3, 0xf1, 0x3a, 0x92, 0xa5, 0xf8, 0x25, 0x94, 0xf5, 0x76, 0x7a, 0x42, - 0xa7, 0xf9, 0xe4, 0x2f, 0x67, 0xc8, 0x75, 0x27, 0x6a, 0x49, 0x89, 0xdb, 0x16, 0x31, 0x62, 0xfc, - 0x0a, 0xec, 0x01, 0x9d, 0x8d, 0x74, 0x1c, 0x4e, 0xf3, 0xf1, 0x2d, 0x4c, 0x3a, 0x2f, 0xde, 0xbe, - 0x56, 0x1e, 0xad, 0xea, 0xe2, 0x6a, 0xd7, 0x56, 0xa8, 0x6d, 0x11, 0x6d, 0x82, 0x8f, 0x61, 0x93, - 0x7d, 0x94, 0x6c, 0x2c, 0x06, 0x7c, 0xac, 0x83, 0x74, 0x9a, 0x7b, 0xb7, 0x70, 0x3c, 0xca, 0x35, - 0x2a, 0x8f, 0xb6, 0x45, 0x7e, 0x9b, 0xe0, 0x53, 0xd8, 0x5a, 0x81, 0xee, 0x84, 0xce, 0x47, 0x9c, - 0xf6, 0xf4, 0x65, 0x38, 0xcd, 0x83, 0x7f, 0x71, 0x3e, 0x36, 0xd2, 0xb6, 0x45, 0x6a, 0xec, 0x5a, - 0xef, 0xf9, 0xdd, 0x6f, 0x5f, 0xeb, 0x77, 0x8c, 0x57, 0x5d, 0xf4, 0x86, 0xc1, 0x5e, 0xfc, 0xf4, - 0x59, 0xab, 0x02, 0xf6, 0x40, 0xb2, 0x34, 0x3c, 0x84, 0xad, 0x1b, 0xe9, 0xa9, 0x57, 0x31, 0xa6, - 0xa9, 0x49, 0x7e, 0x93, 0xe8, 0xba, 0xd0, 0x25, 0xfc, 0x84, 0xa0, 0x76, 0x3d, 0x37, 0x5c, 0x83, - 0xd2, 0x90, 0xcd, 0xb5, 0xd8, 0x25, 0xaa, 0xc4, 0xdb, 0x50, 0x9e, 0xd1, 0xd1, 0x39, 0xd3, 0xb7, - 0xe0, 0x12, 0x03, 0xb0, 0x07, 0xff, 0xcd, 0xd8, 0x74, 0x95, 0x65, 0x89, 0xe4, 0xf0, 0x8f, 0xd7, - 0xad, 0xa2, 0x28, 0xe7, 0xaf, 0xbb, 0xf8, 0x0c, 0xef, 0xe0, 0x5e, 0x61, 0xd0, 0x45, 0x53, 0xac, - 0xfb, 0x3e, 0x8a, 0x9d, 0x3b, 0xe0, 0xad, 0x0b, 0x5a, 0x1d, 0x3e, 0xbf, 0x2e, 0x33, 0x68, 0x0e, - 0x8b, 0xe3, 0x3e, 0xbc, 0x58, 0xf8, 0xe8, 0x72, 0xe1, 0xa3, 0x1f, 0x0b, 0x1f, 0x7d, 0x5e, 0xfa, - 0xd6, 0xe5, 0xd2, 0xb7, 0xbe, 0x2f, 0x7d, 0xeb, 0x7d, 0x68, 0xa8, 0xa2, 0x37, 0x8c, 0x07, 0xfc, - 0xc6, 0x2f, 0x45, 0xce, 0x27, 0x4c, 0x9c, 0x56, 0xf4, 0x1f, 0xe0, 0xe0, 0x57, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x57, 0xe2, 0xd5, 0x36, 0x79, 0x04, 0x00, 0x00, -} - -func (m *Snapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x22 - } - if m.Chunks != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Chunks)) - i-- - dAtA[i] = 0x18 - } - if m.Format != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Format)) - i-- - dAtA[i] = 0x10 - } - if m.Height != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChunkHashes) > 0 { - for iNdEx := len(m.ChunkHashes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ChunkHashes[iNdEx]) - copy(dAtA[i:], m.ChunkHashes[iNdEx]) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.ChunkHashes[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SnapshotItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Item != nil { - { - size := m.Item.Size() - i -= size - if _, err := m.Item.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *SnapshotItem_Store) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_Store) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Store != nil { - { - size, err := m.Store.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *SnapshotItem_IAVL) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_IAVL) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.IAVL != nil { - { - size, err := m.IAVL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *SnapshotItem_Extension) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_Extension) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Extension != nil { - { - size, err := m.Extension.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *SnapshotItem_ExtensionPayload) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_ExtensionPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ExtensionPayload != nil { - { - size, err := m.ExtensionPayload.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *SnapshotStoreItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotStoreItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotStoreItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotIAVLItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotIAVLItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotIAVLItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Height != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x20 - } - if m.Version != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x18 - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotExtensionMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotExtensionMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotExtensionMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Format != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Format)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotExtensionPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotExtensionPayload) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotExtensionPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { - offset -= sovSnapshot(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovSnapshot(uint64(m.Height)) - } - if m.Format != 0 { - n += 1 + sovSnapshot(uint64(m.Format)) - } - if m.Chunks != 0 { - n += 1 + sovSnapshot(uint64(m.Chunks)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = m.Metadata.Size() - n += 1 + l + sovSnapshot(uint64(l)) - return n -} - -func (m *Metadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChunkHashes) > 0 { - for _, b := range m.ChunkHashes { - l = len(b) - n += 1 + l + sovSnapshot(uint64(l)) - } - } - return n -} - -func (m *SnapshotItem) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Item != nil { - n += m.Item.Size() - } - return n -} - -func (m *SnapshotItem_Store) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Store != nil { - l = m.Store.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotItem_IAVL) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IAVL != nil { - l = m.IAVL.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotItem_Extension) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Extension != nil { - l = m.Extension.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotItem_ExtensionPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExtensionPayload != nil { - l = m.ExtensionPayload.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotStoreItem) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} - -func (m *SnapshotIAVLItem) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.Version != 0 { - n += 1 + sovSnapshot(uint64(m.Version)) - } - if m.Height != 0 { - n += 1 + sovSnapshot(uint64(m.Height)) - } - return n -} - -func (m *SnapshotExtensionMeta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.Format != 0 { - n += 1 + sovSnapshot(uint64(m.Format)) - } - return n -} - -func (m *SnapshotExtensionPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Payload) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} - -func sovSnapshot(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSnapshot(x uint64) (n int) { - return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Snapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - m.Format = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Format |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - m.Chunks = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Chunks |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunkHashes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChunkHashes = append(m.ChunkHashes, make([]byte, postIndex-iNdEx)) - copy(m.ChunkHashes[len(m.ChunkHashes)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotStoreItem{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_Store{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IAVL", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotIAVLItem{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_IAVL{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotExtensionMeta{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_Extension{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtensionPayload", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotExtensionPayload{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_ExtensionPayload{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotStoreItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotStoreItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotStoreItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotIAVLItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotIAVLItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotIAVLItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotExtensionMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotExtensionMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotExtensionMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - m.Format = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Format |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotExtensionPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotExtensionPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotExtensionPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) - if m.Payload == nil { - m.Payload = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnapshot(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSnapshot - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSnapshot - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSnapshot - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSnapshot = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/snapshots/types/snapshotter.go b/store/snapshots/types/snapshotter.go deleted file mode 100644 index de9fcfe3d3..0000000000 --- a/store/snapshots/types/snapshotter.go +++ /dev/null @@ -1,56 +0,0 @@ -package types - -import ( - protoio "github.com/cosmos/gogoproto/io" -) - -// Snapshotter is something that can create and restore snapshots, consisting of streamed binary -// chunks - all of which must be read from the channel and closed. If an unsupported format is -// given, it must return ErrUnknownFormat (possibly wrapped with fmt.Errorf). -type Snapshotter interface { - // Snapshot writes snapshot items into the protobuf writer. - Snapshot(height uint64, protoWriter protoio.Writer) error - - // PruneSnapshotHeight prunes the given height according to the prune strategy. - // If PruneNothing, this is a no-op. - // If other strategy, this height is persisted until it is - // less than - KeepRecent and % Interval == 0 - PruneSnapshotHeight(height int64) - - // SetSnapshotInterval sets the interval at which the snapshots are taken. - // It is used by the store that implements the Snapshotter interface - // to determine which heights to retain until after the snapshot is complete. - SetSnapshotInterval(snapshotInterval uint64) - - // Restore restores a state snapshot, taking the reader of protobuf message stream as input. - Restore(height uint64, format uint32, protoReader protoio.Reader) (SnapshotItem, error) -} - -// ExtensionPayloadReader read extension payloads, -// it returns io.EOF when reached either end of stream or the extension boundaries. -type ExtensionPayloadReader = func() ([]byte, error) - -// ExtensionPayloadWriter is a helper to write extension payloads to underlying stream. -type ExtensionPayloadWriter = func([]byte) error - -// ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream. -// ExtensionSnapshotter has an unique name and manages it's own internal formats. -type ExtensionSnapshotter interface { - // SnapshotName returns the name of snapshotter, it should be unique in the manager. - SnapshotName() string - - // SnapshotFormat returns the default format the extension snapshotter use to encode the - // payloads when taking a snapshot. - // It's defined within the extension, different from the global format for the whole state-sync snapshot. - SnapshotFormat() uint32 - - // SupportedFormats returns a list of formats it can restore from. - SupportedFormats() []uint32 - - // SnapshotExtension writes extension payloads into the underlying protobuf stream. - SnapshotExtension(height uint64, payloadWriter ExtensionPayloadWriter) error - - // RestoreExtension restores an extension state snapshot, - // the payload reader returns `io.EOF` when reached the extension boundaries. - RestoreExtension(height uint64, format uint32, payloadReader ExtensionPayloadReader) error -} diff --git a/store/snapshots/types/util.go b/store/snapshots/types/util.go deleted file mode 100644 index 861647088b..0000000000 --- a/store/snapshots/types/util.go +++ /dev/null @@ -1,16 +0,0 @@ -package types - -import ( - protoio "github.com/cosmos/gogoproto/io" -) - -// WriteExtensionPayload writes an extension payload for current extension snapshotter. -func WriteExtensionPayload(protoWriter protoio.Writer, payload []byte) error { - return protoWriter.WriteMsg(&SnapshotItem{ - Item: &SnapshotItem_ExtensionPayload{ - ExtensionPayload: &SnapshotExtensionPayload{ - Payload: payload, - }, - }, - }) -} diff --git a/store/sonar-project.properties b/store/sonar-project.properties deleted file mode 100644 index 008f93fc9f..0000000000 --- a/store/sonar-project.properties +++ /dev/null @@ -1,16 +0,0 @@ -sonar.projectKey=cosmos-sdk-store -sonar.organization=cosmos - -sonar.projectName=Cosmos SDK - Store -sonar.project.monorepo.enabled=true - -sonar.sources=. -sonar.exclusions=**/*_test.go,**/*.pb.go,**/*.pulsar.go,**/*.pb.gw.go -sonar.coverage.exclusions=**/*_test.go,**/testutil/**,**/*.pb.go,**/*.pb.gw.go,**/*.pulsar.go,test_helpers.go,docs/** -sonar.tests=. -sonar.test.inclusions=**/*_test.go -sonar.go.coverage.reportPaths=coverage.out - -sonar.sourceEncoding=UTF-8 -sonar.scm.provider=git -sonar.scm.forceReloadAll=true diff --git a/store/store.go b/store/store.go deleted file mode 100644 index e02ea24a66..0000000000 --- a/store/store.go +++ /dev/null @@ -1,18 +0,0 @@ -package store - -import ( - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/cache" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/rootmulti" - "cosmossdk.io/store/types" -) - -func NewCommitMultiStore(db dbm.DB, logger types.Logger, metricGatherer metrics.StoreMetrics) types.CommitMultiStore { - return rootmulti.NewStore(db, logger, metricGatherer) -} - -func NewCommitKVStoreCacheManager() types.MultiStorePersistentCache { - return cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize) -} diff --git a/store/streaming/README.md b/store/streaming/README.md deleted file mode 100644 index faa304dec0..0000000000 --- a/store/streaming/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Cosmos-SDK Plugins - -This package contains an extensible plugin system for the Cosmos-SDK. The plugin system leverages the [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin) system. This system is designed to work over RPC. - -Although the `go-plugin` is built to work over RPC, it is currently only designed to work over a local network. - -## Pre requisites - -For an overview of supported features by the `go-plugin` system, please see https://github.com/hashicorp/go-plugin. The `go-plugin` documentation is located [here](https://github.com/hashicorp/go-plugin/tree/master/docs). You can also directly visit any of the links below: - -* [Writing plugins without Go](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md) -* [Go Plugin Tutorial](https://github.com/hashicorp/go-plugin/blob/master/docs/extensive-go-plugin-tutorial.md) -* [Plugin Internals](https://github.com/hashicorp/go-plugin/blob/master/docs/internals.md) -* [Plugin Architecture](https://www.youtube.com/watch?v=SRvm3zQQc1Q) (start here) - -## Exposing plugins - -To expose plugins to the plugin system, you will need to: - -1. Implement the gRPC message protocol service of the plugin -2. Build the plugin binary -3. Export it - -Read the plugin documentation in the [Streaming Plugins](#streaming-plugins) section for examples on how to build a plugin. - -## Streaming Plugins - -List of support streaming plugins - -* [ABCI State Streaming Plugin](abci/README.md) diff --git a/store/streaming/abci/README.md b/store/streaming/abci/README.md deleted file mode 100644 index 08aaf12e8a..0000000000 --- a/store/streaming/abci/README.md +++ /dev/null @@ -1,210 +0,0 @@ -# ABCI and State Streaming Plugin (gRPC) - -The `BaseApp` package contains the interface for a [ABCIListener](https://github.com/cosmos/cosmos-sdk/blob/main/baseapp/streaming.go) -service used to write state changes out from individual KVStores to external systems, -as described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-038-state-listening.md). - -Specific `ABCIListener` service implementations are written and loaded as [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin). - -## Implementation - -In this section we describe the implementation of the `ABCIListener` interface as a gRPC service. - -### Service Protocol - -The companion service protocol for the `ABCIListener` interface is described below. -See [proto/cosmos/store/streaming/abci/grpc.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/store/streaming/abci/grpc.proto) for full details. - -```protobuf reference -https://github.com/cosmos/cosmos-sdk/blob/6cee22df52eb0cbb30e351fbb41f66d26c1f8300/proto/cosmos/store/streaming/abci/grpc.proto#L1-L36 -``` - -### Generating the Code - -To generate the stubs the local client implementation can call, run the following command: - -```shell -make proto-gen -``` - -For other languages you'll need to [download](https://github.com/cosmos/cosmos-sdk/blob/main/third_party/proto/README.md) -the CosmosSDK protos into your project and compile. For language specific compilation instructions visit -[https://github.com/grpc](https://github.com/grpc) and look in the `examples` folder of your -language of choice `https://github.com/grpc/grpc-{language}/tree/master/examples` and [https://grpc.io](https://grpc.io) -for the documentation. - -### gRPC Client and Server - -Implementing the ABCIListener gRPC client and server is a simple and straight forward process. - -To create the client and server we create a `ListenerGRPCPlugin` struct that implements the -`plugin.GRPCPlugin` interface and a `Impl` property that will contain a concrete implementation -of the `ABCIListener` plugin written in Go. - -#### The Interface - -The `BaseApp` `ABCIListener` interface will be what will define the plugins capabilities. - -Boilerplate RPC implementation example of the `ABCIListener` interface. ([store/streaming/abci/grpc.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/abci/grpc.go)) - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/grpc.go#L13-L79 -``` - -Our `ABCIlistener` service plugin. ([store/streaming/plugins/abci/v1/interface.go](interface.go)) - -```go reference -https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/interface.go#L13-L45 -``` - -#### Plugin Implementation - -Plugin implementations can be in a completely separate package but will need access -to the `ABCIListener` interface. One thing to note here is that plugin implementations -defined in the `ListenerGRPCPlugin.Impl` property are **only** required when building -plugins in Go. They are pre-compiled into Go modules. The `GRPCServer.Impl` calls methods -on this out-of-process plugin. - -For Go plugins this is all that is required to process data that is sent over gRPC. -This provides the advantage of writing quick plugins that process data to different -external systems (i.e: DB, File, DB, Kafka, etc.) without the need for implementing -the gRPC server endpoints. - -```go -// MyPlugin is the implementation of the ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type MyPlugin struct { - ... -} - -func (a FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { - // process data - return nil -} - -func (a FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error { - // process data - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: v1.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &ABCIListenerGRPCPlugin{Impl: &MyPlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} -``` - -## Plugin Loading System - -A general purpose plugin loading system has been provided by the SDK to be able to load not just -the `ABCIListener` service plugin but other protocol services as well. You can take a look -at how plugins are loaded by the SDK in [store/streaming/streaming.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/streaming.go) - -You'll need to add this in your `app.go` - -```go -// app.go - -func NewApp(...) *App { - - ... - - // register streaming services - streamingCfg := cast.ToStringMap(appOpts.Get(baseapp.StreamingTomlKey)) - for service := range streamingCfg { - pluginKey := fmt.Sprintf("%s.%s.%s", baseapp.StreamingTomlKey, service, baseapp.StreamingABCIPluginTomlKey) - pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey))) - if len(pluginName) > 0 { - logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel)) - plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel) - if err != nil { - tmos.Exit(err.Error()) - } - if err := baseapp.RegisterStreamingPlugin(bApp, appOpts, keys, plugin); err != nil { - tmos.Exit(err.Error()) - } - } - } - - ... -} -``` - -## Configuration - -Update the streaming section in `app.toml` - -```toml -# Streaming allows nodes to stream state to external systems -[streaming] - -# streaming.abci specifies the configuration for the ABCI Listener streaming service -[streaming.abci] - -# List of kv store keys to stream out via gRPC -# Set to ["*"] to expose all keys. -keys = ["*"] - -# The plugin name used for streaming via gRPC -# Supported plugins: abci -plugin = "abci" - -# stop-node-on-err specifies whether to stop the node when the -stop-node-on-err = true -``` - -## Updating the protocol - -If you update the protocol buffers file, you can regenerate the file and plugins using the -following commands from the project root directory. You do not need to run this if you're -just trying the examples, you can skip ahead to the [Testing](#testing) section. - -```shell -make proto-gen -``` - -* stdout plugin; from inside the `store/` dir, run: - -```shell -go build -o streaming/abci/examples/stdout/stdout streaming/abci/examples/stdout/stdout.go -``` - -* file plugin (writes to `~/`); from inside the `store/` dir, run: - -```shell -go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go -``` - -### Testing - -Export a plugin from one of the Go or Python examples. - -* stdout plugin - -```shell -export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/stdout/stdout" -``` - -* file plugin (writes to ~/) - -```shell -export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/file/file" -``` - -where `{path to}` is the parent path to the `cosmos-sdk` repo on you system. - -Test: - -```shell -make test-sim-nondeterminism-streaming -``` - -The plugin system will look for the plugin binary in the `env` variable `COSMOS_SDK_{PLUGIN_NAME}` above -and if it does not find it, it will error out. The plugin UPPERCASE name is that of the -`streaming.abci.plugin` TOML configuration setting. diff --git a/store/streaming/abci/examples/file/.gitignore b/store/streaming/abci/examples/file/.gitignore deleted file mode 100644 index bc8ff79063..0000000000 --- a/store/streaming/abci/examples/file/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# ignore the file plugin binary -file \ No newline at end of file diff --git a/store/streaming/abci/examples/file/README.md b/store/streaming/abci/examples/file/README.md deleted file mode 100644 index 27e5f8956e..0000000000 --- a/store/streaming/abci/examples/file/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# File Plugin - -The file plugin is an example plugin written in Go. It is intended for local testing and should not be used in production environments. - -## Build - -To build the plugin run the following command: - -```shell -cd store -``` - -```shell -go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go -``` - -* The plugin will write files to the users home directory `~/`. diff --git a/store/streaming/abci/examples/file/file.go b/store/streaming/abci/examples/file/file.go deleted file mode 100644 index 061653a555..0000000000 --- a/store/streaming/abci/examples/file/file.go +++ /dev/null @@ -1,81 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "path/filepath" - - abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" - "github.com/hashicorp/go-plugin" - - streamingabci "cosmossdk.io/store/streaming/abci" - store "cosmossdk.io/store/types" -) - -// FilePlugin is the implementation of the baseapp.ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type FilePlugin struct { - BlockHeight int64 -} - -func (a *FilePlugin) writeToFile(file string, data []byte) error { - home, err := os.UserHomeDir() - if err != nil { - return err - } - - filename := fmt.Sprintf("%s/%s.txt", home, file) - f, err := os.OpenFile(filepath.Clean(filename), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600) - if err != nil { - return err - } - - if _, err := f.Write(data); err != nil { - f.Close() // ignore error; Write error takes precedence - return err - } - - if err := f.Close(); err != nil { - return err - } - - return nil -} - -func (a *FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.FinalizeBlockRequest, res abci.FinalizeBlockResponse) error { - d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) - d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req)) - if err := a.writeToFile("finalize-block-req", d1); err != nil { - return err - } - if err := a.writeToFile("finalize-block-res", d2); err != nil { - return err - } - return nil -} - -func (a *FilePlugin) ListenCommit(ctx context.Context, res abci.CommitResponse, changeSet []*store.StoreKVPair) error { - fmt.Printf("listen-commit: block_height=%d data=%v", res.RetainHeight, changeSet) - d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, res)) - d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, changeSet)) - if err := a.writeToFile("commit-res", d1); err != nil { - return err - } - if err := a.writeToFile("state-change", d2); err != nil { - return err - } - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: streamingabci.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &streamingabci.ListenerGRPCPlugin{Impl: &FilePlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/store/streaming/abci/examples/stdout/stdout b/store/streaming/abci/examples/stdout/stdout deleted file mode 100755 index 93f61a7b93..0000000000 Binary files a/store/streaming/abci/examples/stdout/stdout and /dev/null differ diff --git a/store/streaming/abci/examples/stdout/stdout.go b/store/streaming/abci/examples/stdout/stdout.go deleted file mode 100644 index 99bef64147..0000000000 --- a/store/streaming/abci/examples/stdout/stdout.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "context" - "fmt" - - abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" - "github.com/hashicorp/go-plugin" - - streamingabci "cosmossdk.io/store/streaming/abci" - store "cosmossdk.io/store/types" -) - -// StdoutPlugin is the implementation of the ABCIListener interface -// For Go plugins this is all that is required to process data sent over gRPC. -type StdoutPlugin struct { - BlockHeight int64 -} - -func (a *StdoutPlugin) ListenFinalizeBlock(ctx context.Context, req abci.FinalizeBlockRequest, res abci.FinalizeBlockResponse) error { - a.BlockHeight = req.Height - // process tx messages (i.e: sent to external system) - fmt.Printf("listen-finalize-block: block-height=%d req=%v res=%v", a.BlockHeight, req, res) - return nil -} - -func (a *StdoutPlugin) ListenCommit(ctx context.Context, res abci.CommitResponse, changeSet []*store.StoreKVPair) error { - // process block commit messages (i.e: sent to external system) - fmt.Printf("listen-commit: block_height=%d res=%v data=%v", a.BlockHeight, res, changeSet) - return nil -} - -func main() { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: streamingabci.Handshake, - Plugins: map[string]plugin.Plugin{ - "abci": &streamingabci.ListenerGRPCPlugin{Impl: &StdoutPlugin{}}, - }, - - // A non-nil value here enables gRPC serving for this streaming... - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/store/streaming/abci/grpc.go b/store/streaming/abci/grpc.go deleted file mode 100644 index 89e1c01c2b..0000000000 --- a/store/streaming/abci/grpc.go +++ /dev/null @@ -1,79 +0,0 @@ -package abci - -import ( - "context" - "os" - - abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" - "github.com/hashicorp/go-plugin" - - storetypes "cosmossdk.io/store/types" -) - -var _ storetypes.ABCIListener = (*GRPCClient)(nil) - -// GRPCClient is an implementation of the ABCIListener interface that talks over RPC. -type GRPCClient struct { - client ABCIListenerServiceClient -} - -// ListenFinalizeBlock listens to end block request and responses. -// In addition, it retrieves a types.Context from a context.Context instance. -// It panics if a types.Context was not properly attached. -// When the node is configured to stop on listening errors, -// it will terminate immediately and exit with a non-zero code. -func (m *GRPCClient) ListenFinalizeBlock(goCtx context.Context, req abci.FinalizeBlockRequest, res abci.FinalizeBlockResponse) error { - ctx := goCtx.(storetypes.Context) - sm := ctx.StreamingManager() - request := &ListenFinalizeBlockRequest{Req: &req, Res: &res} - _, err := m.client.ListenFinalizeBlock(goCtx, request) - if err != nil && sm.StopNodeOnErr { - ctx.Logger().Error("FinalizeBlock listening hook failed", "height", ctx.BlockHeight(), "err", err) - cleanupAndExit() - } - return err -} - -// ListenCommit listens to commit responses and state changes for the current block. -// In addition, it retrieves a types.Context from a context.Context instance. -// It panics if a types.Context was not properly attached. -// When the node is configured to stop on listening errors, -// it will terminate immediately and exit with a non-zero code. -func (m *GRPCClient) ListenCommit(goCtx context.Context, res abci.CommitResponse, changeSet []*storetypes.StoreKVPair) error { - ctx := goCtx.(storetypes.Context) - sm := ctx.StreamingManager() - request := &ListenCommitRequest{BlockHeight: ctx.BlockHeight(), Res: &res, ChangeSet: changeSet} - _, err := m.client.ListenCommit(goCtx, request) - if err != nil && sm.StopNodeOnErr { - ctx.Logger().Error("Commit listening hook failed", "height", ctx.BlockHeight(), "err", err) - cleanupAndExit() - } - return err -} - -func cleanupAndExit() { - plugin.CleanupClients() - os.Exit(1) -} - -var _ ABCIListenerServiceServer = (*GRPCServer)(nil) - -// GRPCServer is the gRPC server that GRPCClient talks to. -type GRPCServer struct { - // This is the real implementation - Impl storetypes.ABCIListener -} - -func (m GRPCServer) ListenFinalizeBlock(ctx context.Context, request *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) { - if err := m.Impl.ListenFinalizeBlock(ctx, *request.Req, *request.Res); err != nil { - return nil, err - } - return &ListenFinalizeBlockResponse{}, nil -} - -func (m GRPCServer) ListenCommit(ctx context.Context, request *ListenCommitRequest) (*ListenCommitResponse, error) { - if err := m.Impl.ListenCommit(ctx, *request.Res, request.ChangeSet); err != nil { - return nil, err - } - return &ListenCommitResponse{}, nil -} diff --git a/store/streaming/abci/grpc.pb.go b/store/streaming/abci/grpc.pb.go deleted file mode 100644 index b9a8e7622c..0000000000 --- a/store/streaming/abci/grpc.pb.go +++ /dev/null @@ -1,1050 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/streaming/abci/grpc.proto - -package abci - -import ( - context "context" - types "cosmossdk.io/store/types" - fmt "fmt" - v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ListenEndBlockRequest is the request type for the ListenEndBlock RPC method -type ListenFinalizeBlockRequest struct { - Req *v1.FinalizeBlockRequest `protobuf:"bytes,1,opt,name=req,proto3" json:"req,omitempty"` - Res *v1.FinalizeBlockResponse `protobuf:"bytes,2,opt,name=res,proto3" json:"res,omitempty"` -} - -func (m *ListenFinalizeBlockRequest) Reset() { *m = ListenFinalizeBlockRequest{} } -func (m *ListenFinalizeBlockRequest) String() string { return proto.CompactTextString(m) } -func (*ListenFinalizeBlockRequest) ProtoMessage() {} -func (*ListenFinalizeBlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{0} -} -func (m *ListenFinalizeBlockRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenFinalizeBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenFinalizeBlockRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenFinalizeBlockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenFinalizeBlockRequest.Merge(m, src) -} -func (m *ListenFinalizeBlockRequest) XXX_Size() int { - return m.Size() -} -func (m *ListenFinalizeBlockRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenFinalizeBlockRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenFinalizeBlockRequest proto.InternalMessageInfo - -func (m *ListenFinalizeBlockRequest) GetReq() *v1.FinalizeBlockRequest { - if m != nil { - return m.Req - } - return nil -} - -func (m *ListenFinalizeBlockRequest) GetRes() *v1.FinalizeBlockResponse { - if m != nil { - return m.Res - } - return nil -} - -// ListenEndBlockResponse is the response type for the ListenEndBlock RPC method -type ListenFinalizeBlockResponse struct { -} - -func (m *ListenFinalizeBlockResponse) Reset() { *m = ListenFinalizeBlockResponse{} } -func (m *ListenFinalizeBlockResponse) String() string { return proto.CompactTextString(m) } -func (*ListenFinalizeBlockResponse) ProtoMessage() {} -func (*ListenFinalizeBlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{1} -} -func (m *ListenFinalizeBlockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenFinalizeBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenFinalizeBlockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenFinalizeBlockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenFinalizeBlockResponse.Merge(m, src) -} -func (m *ListenFinalizeBlockResponse) XXX_Size() int { - return m.Size() -} -func (m *ListenFinalizeBlockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListenFinalizeBlockResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenFinalizeBlockResponse proto.InternalMessageInfo - -// ListenCommitRequest is the request type for the ListenCommit RPC method -type ListenCommitRequest struct { - // explicitly pass in block height as ResponseCommit does not contain this - // info - BlockHeight int64 `protobuf:"varint,1,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` - Res *v1.CommitResponse `protobuf:"bytes,2,opt,name=res,proto3" json:"res,omitempty"` - ChangeSet []*types.StoreKVPair `protobuf:"bytes,3,rep,name=change_set,json=changeSet,proto3" json:"change_set,omitempty"` -} - -func (m *ListenCommitRequest) Reset() { *m = ListenCommitRequest{} } -func (m *ListenCommitRequest) String() string { return proto.CompactTextString(m) } -func (*ListenCommitRequest) ProtoMessage() {} -func (*ListenCommitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{2} -} -func (m *ListenCommitRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenCommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenCommitRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenCommitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenCommitRequest.Merge(m, src) -} -func (m *ListenCommitRequest) XXX_Size() int { - return m.Size() -} -func (m *ListenCommitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenCommitRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenCommitRequest proto.InternalMessageInfo - -func (m *ListenCommitRequest) GetBlockHeight() int64 { - if m != nil { - return m.BlockHeight - } - return 0 -} - -func (m *ListenCommitRequest) GetRes() *v1.CommitResponse { - if m != nil { - return m.Res - } - return nil -} - -func (m *ListenCommitRequest) GetChangeSet() []*types.StoreKVPair { - if m != nil { - return m.ChangeSet - } - return nil -} - -// ListenCommitResponse is the response type for the ListenCommit RPC method -type ListenCommitResponse struct { -} - -func (m *ListenCommitResponse) Reset() { *m = ListenCommitResponse{} } -func (m *ListenCommitResponse) String() string { return proto.CompactTextString(m) } -func (*ListenCommitResponse) ProtoMessage() {} -func (*ListenCommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7b98083eb9315fb6, []int{3} -} -func (m *ListenCommitResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListenCommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListenCommitResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListenCommitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenCommitResponse.Merge(m, src) -} -func (m *ListenCommitResponse) XXX_Size() int { - return m.Size() -} -func (m *ListenCommitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListenCommitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenCommitResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ListenFinalizeBlockRequest)(nil), "cosmos.store.streaming.abci.ListenFinalizeBlockRequest") - proto.RegisterType((*ListenFinalizeBlockResponse)(nil), "cosmos.store.streaming.abci.ListenFinalizeBlockResponse") - proto.RegisterType((*ListenCommitRequest)(nil), "cosmos.store.streaming.abci.ListenCommitRequest") - proto.RegisterType((*ListenCommitResponse)(nil), "cosmos.store.streaming.abci.ListenCommitResponse") -} - -func init() { - proto.RegisterFile("cosmos/store/streaming/abci/grpc.proto", fileDescriptor_7b98083eb9315fb6) -} - -var fileDescriptor_7b98083eb9315fb6 = []byte{ - // 414 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x8f, 0xd2, 0x40, - 0x18, 0xc6, 0x29, 0x4d, 0x4c, 0x1c, 0x38, 0x0d, 0xc6, 0x90, 0xa2, 0x0d, 0x34, 0x06, 0x39, 0x4d, - 0x6d, 0x3d, 0x88, 0xf1, 0xa2, 0x90, 0x18, 0x8d, 0x1e, 0x4c, 0x49, 0x3c, 0x78, 0x21, 0x6d, 0x7d, - 0x2d, 0x13, 0x68, 0xa7, 0xcc, 0x8c, 0x4d, 0xf4, 0x13, 0x78, 0x74, 0x0f, 0xfb, 0x35, 0xf6, 0x73, - 0xec, 0x91, 0xe3, 0x1e, 0x37, 0xf0, 0x45, 0x36, 0x9d, 0x59, 0x08, 0xcd, 0xb2, 0x7f, 0x38, 0xf6, - 0x9d, 0xe7, 0xf7, 0xbc, 0x4f, 0xe7, 0x7d, 0x07, 0xf5, 0x63, 0x26, 0x52, 0x26, 0x5c, 0x21, 0x19, - 0x07, 0x57, 0x48, 0x0e, 0x61, 0x4a, 0xb3, 0xc4, 0x0d, 0xa3, 0x98, 0xba, 0x09, 0xcf, 0x63, 0x92, - 0x73, 0x26, 0x19, 0xee, 0x68, 0x1d, 0x51, 0x3a, 0xb2, 0xd3, 0x91, 0x52, 0x67, 0x3d, 0x8b, 0x59, - 0x0a, 0x32, 0xfa, 0x25, 0x35, 0x56, 0x78, 0xae, 0xfc, 0x93, 0x83, 0xd0, 0xa8, 0xf5, 0xa2, 0xd2, - 0xa2, 0xf0, 0x22, 0x90, 0xa1, 0xe7, 0x2e, 0xa8, 0x90, 0x90, 0x95, 0x16, 0x4a, 0xe5, 0x9c, 0x18, - 0xc8, 0xfa, 0xaa, 0x6a, 0x1f, 0x69, 0x16, 0x2e, 0xe8, 0x5f, 0x18, 0x2d, 0x58, 0x3c, 0x0f, 0x60, - 0xf9, 0x1b, 0x84, 0xc4, 0x43, 0x64, 0x72, 0x58, 0xb6, 0x8d, 0xae, 0x31, 0x68, 0xf8, 0x7d, 0xb2, - 0x6d, 0xa8, 0xfa, 0x93, 0xc2, 0x23, 0x87, 0xa0, 0xa0, 0x44, 0xf0, 0xdb, 0x92, 0x14, 0xed, 0xba, - 0x22, 0x5f, 0xde, 0x4b, 0x8a, 0x9c, 0x65, 0x02, 0x4a, 0x54, 0x38, 0xcf, 0x51, 0xe7, 0x60, 0x24, - 0xad, 0x71, 0xce, 0x0c, 0xd4, 0xd2, 0xe7, 0x63, 0x96, 0xa6, 0x54, 0x6e, 0xb3, 0xf6, 0x50, 0x33, - 0x2a, 0x85, 0xd3, 0x19, 0xd0, 0x64, 0x26, 0x55, 0x68, 0x33, 0x68, 0xa8, 0xda, 0x27, 0x55, 0xc2, - 0xfe, 0x7e, 0xa8, 0xee, 0xcd, 0x50, 0x5b, 0xc3, 0xbd, 0x34, 0xf8, 0x3d, 0x42, 0xf1, 0x2c, 0xcc, - 0x12, 0x98, 0x0a, 0x90, 0x6d, 0xb3, 0x6b, 0x0e, 0x1a, 0x7e, 0x8f, 0x54, 0xe6, 0x72, 0x7d, 0xb9, - 0x64, 0x52, 0x7e, 0x7d, 0xf9, 0xfe, 0x2d, 0xa4, 0x3c, 0x78, 0xac, 0xa1, 0x09, 0x48, 0xe7, 0x29, - 0x7a, 0x52, 0xcd, 0xab, 0xed, 0xfd, 0xd3, 0x3a, 0x6a, 0x7d, 0x18, 0x8d, 0x3f, 0xeb, 0x43, 0xe0, - 0x13, 0xe0, 0x05, 0x8d, 0x01, 0xff, 0xdb, 0xfd, 0x60, 0xe5, 0x02, 0xf0, 0x1b, 0x72, 0xc7, 0x36, - 0x90, 0xdb, 0xa7, 0x68, 0x0d, 0x8f, 0x07, 0x75, 0x44, 0x2c, 0x50, 0x73, 0x3f, 0x3a, 0x7e, 0xf5, - 0x00, 0xa7, 0xca, 0x54, 0x2c, 0xef, 0x08, 0x42, 0x37, 0x1d, 0xbd, 0x3b, 0x5f, 0xdb, 0xc6, 0x6a, - 0x6d, 0x1b, 0x97, 0x6b, 0xdb, 0xf8, 0xbf, 0xb1, 0x6b, 0xab, 0x8d, 0x5d, 0xbb, 0xd8, 0xd8, 0xb5, - 0x1f, 0x3d, 0xed, 0x25, 0x7e, 0xce, 0x09, 0x65, 0x07, 0x1f, 0x4f, 0xf4, 0x48, 0xed, 0xf5, 0xeb, - 0xab, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xf4, 0x63, 0xc3, 0x62, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ABCIListenerServiceClient is the client API for ABCIListenerService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ABCIListenerServiceClient interface { - // ListenFinalizeBlock is the corresponding endpoint for - // ABCIListener.ListenEndBlock - ListenFinalizeBlock(ctx context.Context, in *ListenFinalizeBlockRequest, opts ...grpc.CallOption) (*ListenFinalizeBlockResponse, error) - // ListenCommit is the corresponding endpoint for ABCIListener.ListenCommit - ListenCommit(ctx context.Context, in *ListenCommitRequest, opts ...grpc.CallOption) (*ListenCommitResponse, error) -} - -type aBCIListenerServiceClient struct { - cc grpc1.ClientConn -} - -func NewABCIListenerServiceClient(cc grpc1.ClientConn) ABCIListenerServiceClient { - return &aBCIListenerServiceClient{cc} -} - -func (c *aBCIListenerServiceClient) ListenFinalizeBlock(ctx context.Context, in *ListenFinalizeBlockRequest, opts ...grpc.CallOption) (*ListenFinalizeBlockResponse, error) { - out := new(ListenFinalizeBlockResponse) - err := c.cc.Invoke(ctx, "/cosmos.store.streaming.abci.ABCIListenerService/ListenFinalizeBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aBCIListenerServiceClient) ListenCommit(ctx context.Context, in *ListenCommitRequest, opts ...grpc.CallOption) (*ListenCommitResponse, error) { - out := new(ListenCommitResponse) - err := c.cc.Invoke(ctx, "/cosmos.store.streaming.abci.ABCIListenerService/ListenCommit", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ABCIListenerServiceServer is the server API for ABCIListenerService service. -type ABCIListenerServiceServer interface { - // ListenFinalizeBlock is the corresponding endpoint for - // ABCIListener.ListenEndBlock - ListenFinalizeBlock(context.Context, *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) - // ListenCommit is the corresponding endpoint for ABCIListener.ListenCommit - ListenCommit(context.Context, *ListenCommitRequest) (*ListenCommitResponse, error) -} - -// UnimplementedABCIListenerServiceServer can be embedded to have forward compatible implementations. -type UnimplementedABCIListenerServiceServer struct { -} - -func (*UnimplementedABCIListenerServiceServer) ListenFinalizeBlock(ctx context.Context, req *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListenFinalizeBlock not implemented") -} -func (*UnimplementedABCIListenerServiceServer) ListenCommit(ctx context.Context, req *ListenCommitRequest) (*ListenCommitResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListenCommit not implemented") -} - -func RegisterABCIListenerServiceServer(s grpc1.Server, srv ABCIListenerServiceServer) { - s.RegisterService(&_ABCIListenerService_serviceDesc, srv) -} - -func _ABCIListenerService_ListenFinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListenFinalizeBlockRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIListenerServiceServer).ListenFinalizeBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cosmos.store.streaming.abci.ABCIListenerService/ListenFinalizeBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIListenerServiceServer).ListenFinalizeBlock(ctx, req.(*ListenFinalizeBlockRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ABCIListenerService_ListenCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListenCommitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIListenerServiceServer).ListenCommit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cosmos.store.streaming.abci.ABCIListenerService/ListenCommit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIListenerServiceServer).ListenCommit(ctx, req.(*ListenCommitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _ABCIListenerService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cosmos.store.streaming.abci.ABCIListenerService", - HandlerType: (*ABCIListenerServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ListenFinalizeBlock", - Handler: _ABCIListenerService_ListenFinalizeBlock_Handler, - }, - { - MethodName: "ListenCommit", - Handler: _ABCIListenerService_ListenCommit_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "cosmos/store/streaming/abci/grpc.proto", -} - -func (m *ListenFinalizeBlockRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenFinalizeBlockRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenFinalizeBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Res != nil { - { - size, err := m.Res.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Req != nil { - { - size, err := m.Req.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListenFinalizeBlockResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenFinalizeBlockResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenFinalizeBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ListenCommitRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenCommitRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenCommitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChangeSet) > 0 { - for iNdEx := len(m.ChangeSet) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ChangeSet[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Res != nil { - { - size, err := m.Res.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGrpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.BlockHeight != 0 { - i = encodeVarintGrpc(dAtA, i, uint64(m.BlockHeight)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ListenCommitResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListenCommitResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListenCommitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintGrpc(dAtA []byte, offset int, v uint64) int { - offset -= sovGrpc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ListenFinalizeBlockRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Req != nil { - l = m.Req.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - if m.Res != nil { - l = m.Res.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - return n -} - -func (m *ListenFinalizeBlockResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ListenCommitRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BlockHeight != 0 { - n += 1 + sovGrpc(uint64(m.BlockHeight)) - } - if m.Res != nil { - l = m.Res.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - if len(m.ChangeSet) > 0 { - for _, e := range m.ChangeSet { - l = e.Size() - n += 1 + l + sovGrpc(uint64(l)) - } - } - return n -} - -func (m *ListenCommitResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovGrpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGrpc(x uint64) (n int) { - return sovGrpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ListenFinalizeBlockRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenFinalizeBlockRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenFinalizeBlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Req", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Req == nil { - m.Req = &v1.FinalizeBlockRequest{} - } - if err := m.Req.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Res", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Res == nil { - m.Res = &v1.FinalizeBlockResponse{} - } - if err := m.Res.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenFinalizeBlockResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenFinalizeBlockResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenFinalizeBlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenCommitRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenCommitRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenCommitRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockHeight", wireType) - } - m.BlockHeight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BlockHeight |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Res", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Res == nil { - m.Res = &v1.CommitResponse{} - } - if err := m.Res.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChangeSet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGrpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGrpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChangeSet = append(m.ChangeSet, &types.StoreKVPair{}) - if err := m.ChangeSet[len(m.ChangeSet)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListenCommitResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGrpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListenCommitResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListenCommitResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGrpc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGrpc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGrpc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGrpc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGrpc - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGrpc - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGrpc - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGrpc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGrpc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGrpc = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/streaming/abci/interface.go b/store/streaming/abci/interface.go deleted file mode 100644 index cecc1b0ad0..0000000000 --- a/store/streaming/abci/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package abci contains shared data between the host and plugins. -package abci - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - storetypes "cosmossdk.io/store/types" -) - -// Handshake is a common handshake that is shared by streaming and host. -// This prevents users from executing bad plugins or executing a plugin -// directory. It is a UX feature, not a security feature. -var Handshake = plugin.HandshakeConfig{ - // This isn't required when using VersionedPlugins - ProtocolVersion: 1, - MagicCookieKey: "ABCI_LISTENER_PLUGIN", - MagicCookieValue: "ef78114d-7bdf-411c-868f-347c99a78345", -} - -var _ plugin.GRPCPlugin = (*ListenerGRPCPlugin)(nil) - -// ListenerGRPCPlugin is the implementation of plugin.GRPCPlugin, so we can serve/consume this. -type ListenerGRPCPlugin struct { - // GRPCPlugin must still implement the Plugin interface - plugin.Plugin - // Concrete implementation, written in Go. This is only used for plugins - // that are written in Go. - Impl storetypes.ABCIListener -} - -func (p *ListenerGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - RegisterABCIListenerServiceServer(s, &GRPCServer{Impl: p.Impl}) - return nil -} - -func (p *ListenerGRPCPlugin) GRPCClient( - _ context.Context, - _ *plugin.GRPCBroker, - c *grpc.ClientConn, -) (interface{}, error) { - return &GRPCClient{client: NewABCIListenerServiceClient(c)}, nil -} diff --git a/store/streaming/streaming.go b/store/streaming/streaming.go deleted file mode 100644 index f553fd16a4..0000000000 --- a/store/streaming/streaming.go +++ /dev/null @@ -1,79 +0,0 @@ -package streaming - -import ( - "fmt" - "os" - "os/exec" - "strings" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - - streamingabci "cosmossdk.io/store/streaming/abci" -) - -const pluginEnvKeyPrefix = "COSMOS_SDK" - -// HandshakeMap contains a map of each supported streaming's handshake config -var HandshakeMap = map[string]plugin.HandshakeConfig{ - "abci": streamingabci.Handshake, -} - -// PluginMap contains a map of supported gRPC plugins -var PluginMap = map[string]plugin.Plugin{ - "abci": &streamingabci.ListenerGRPCPlugin{}, -} - -func GetPluginEnvKey(name string) string { - return fmt.Sprintf("%s_%s", pluginEnvKeyPrefix, strings.ToUpper(name)) -} - -func NewStreamingPlugin(name, logLevel string) (interface{}, error) { - logger := hclog.New(&hclog.LoggerOptions{ - Output: hclog.DefaultOutput, - Level: toHclogLevel(logLevel), - Name: fmt.Sprintf("plugin.%s", name), - }) - - // We're a host. Start by launching the streaming process. - env := os.Getenv(GetPluginEnvKey(name)) - client := plugin.NewClient(&plugin.ClientConfig{ - HandshakeConfig: HandshakeMap[name], - Managed: true, - Plugins: PluginMap, - // For verifying the integrity of executables see SecureConfig documentation - // https://pkg.go.dev/github.com/hashicorp/go-plugin#SecureConfig - //#nosec G204 -- Required to load plugins - Cmd: exec.Command("sh", "-c", env), - Logger: logger, - AllowedProtocols: []plugin.Protocol{ - plugin.ProtocolNetRPC, plugin.ProtocolGRPC, - }, - }) - - // Connect via RPC - rpcClient, err := client.Client() - if err != nil { - return nil, err - } - - // Request streaming plugin - return rpcClient.Dispense(name) -} - -func toHclogLevel(s string) hclog.Level { - switch s { - case "trace": - return hclog.Trace - case "debug": - return hclog.Debug - case "info": - return hclog.Info - case "warn": - return hclog.Warn - case "error": - return hclog.Error - default: - return hclog.DefaultLevel - } -} diff --git a/store/streaming/streaming_test.go b/store/streaming/streaming_test.go deleted file mode 100644 index 75fcec937b..0000000000 --- a/store/streaming/streaming_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package streaming - -import ( - "context" - "fmt" - "os" - "runtime" - "testing" - "time" - - abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" - cmtproto "github.com/cometbft/cometbft/api/cometbft/types/v1" - "github.com/cosmos/gogoproto/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "cosmossdk.io/log" - storetypes "cosmossdk.io/store/types" -) - -type PluginTestSuite struct { - suite.Suite - - loggerCtx MockContext - - workDir string - - finalizeBlockReq abci.FinalizeBlockRequest - finalizeBlockRes abci.FinalizeBlockResponse - commitRes abci.CommitResponse - - changeSet []*storetypes.StoreKVPair -} - -func (s *PluginTestSuite) SetupTest() { - if runtime.GOOS != "linux" { - s.T().Skip("only run on linux") - } - - path, err := os.Getwd() - if err != nil { - s.T().Fail() - } - s.workDir = path - - pluginVersion := "abci" - // to write data to files, replace stdout/stdout => file/file - pluginPath := fmt.Sprintf("%s/abci/examples/stdout/stdout", s.workDir) - if err := os.Setenv(GetPluginEnvKey(pluginVersion), pluginPath); err != nil { - s.T().Fail() - } - - raw, err := NewStreamingPlugin(pluginVersion, "trace") - require.NoError(s.T(), err, "load", "streaming", "unexpected error") - - abciListener, ok := raw.(storetypes.ABCIListener) - require.True(s.T(), ok, "should pass type check") - - header := cmtproto.Header{Height: 1, Time: time.Now()} - logger := log.NewNopLogger() - streamingService := storetypes.StreamingManager{ - ABCIListeners: []storetypes.ABCIListener{abciListener}, - StopNodeOnErr: true, - } - s.loggerCtx = NewMockContext(header, logger, streamingService) - - // test abci message types - - s.finalizeBlockReq = abci.FinalizeBlockRequest{ - Height: s.loggerCtx.BlockHeight(), - Txs: [][]byte{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}, - Misbehavior: []abci.Misbehavior{}, - Hash: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, - DecidedLastCommit: abci.CommitInfo{}, - } - s.finalizeBlockRes = abci.FinalizeBlockResponse{ - Events: []abci.Event{}, - ConsensusParamUpdates: &cmtproto.ConsensusParams{}, - ValidatorUpdates: []abci.ValidatorUpdate{}, - TxResults: []*abci.ExecTxResult{{ - Events: []abci.Event{}, - Code: 1, - Codespace: "mockCodeSpace", - Data: []byte{5, 6, 7, 8}, - GasUsed: 2, - GasWanted: 3, - Info: "mockInfo", - Log: "mockLog", - }}, - } - s.commitRes = abci.CommitResponse{} - - // test store kv pair types - for range [2000]int{} { - s.changeSet = append(s.changeSet, &storetypes.StoreKVPair{ - StoreKey: "mockStore", - Delete: false, - Key: []byte{1, 2, 3}, - Value: []byte{3, 2, 1}, - }) - } -} - -func TestPluginTestSuite(t *testing.T) { - suite.Run(t, new(PluginTestSuite)) -} - -func (s *PluginTestSuite) TestABCIGRPCPlugin() { - s.T().Run("Should successfully load streaming", func(t *testing.T) { - abciListeners := s.loggerCtx.StreamingManager().ABCIListeners - for _, abciListener := range abciListeners { - for i := range [50]int{} { - - err := abciListener.ListenFinalizeBlock(s.loggerCtx, s.finalizeBlockReq, s.finalizeBlockRes) - assert.NoError(t, err, "ListenEndBlock") - - err = abciListener.ListenCommit(s.loggerCtx, s.commitRes, s.changeSet) - assert.NoError(t, err, "ListenCommit") - - s.updateHeight(int64(i + 1)) - } - } - }) -} - -func (s *PluginTestSuite) updateHeight(n int64) { - header := s.loggerCtx.BlockHeader() - header.Height = n - s.loggerCtx = NewMockContext(header, s.loggerCtx.Logger(), s.loggerCtx.StreamingManager()) -} - -var ( - _ context.Context = MockContext{} - _ storetypes.Context = MockContext{} -) - -type MockContext struct { - baseCtx context.Context - header cmtproto.Header - logger log.Logger - streamingManager storetypes.StreamingManager -} - -func (m MockContext) BlockHeight() int64 { return m.header.Height } -func (m MockContext) Logger() log.Logger { return m.logger } -func (m MockContext) StreamingManager() storetypes.StreamingManager { return m.streamingManager } - -func (m MockContext) BlockHeader() cmtproto.Header { - msg := proto.Clone(&m.header).(*cmtproto.Header) - return *msg -} - -func NewMockContext(header cmtproto.Header, logger log.Logger, sm storetypes.StreamingManager) MockContext { - header.Time = header.Time.UTC() - return MockContext{ - baseCtx: context.Background(), - header: header, - logger: logger, - streamingManager: sm, - } -} - -func (m MockContext) Deadline() (deadline time.Time, ok bool) { - return m.baseCtx.Deadline() -} - -func (m MockContext) Done() <-chan struct{} { - return m.baseCtx.Done() -} - -func (m MockContext) Err() error { - return m.baseCtx.Err() -} - -func (m MockContext) Value(key any) any { - return m.baseCtx.Value(key) -} diff --git a/store/tracekv/store.go b/store/tracekv/store.go deleted file mode 100644 index ba6df431da..0000000000 --- a/store/tracekv/store.go +++ /dev/null @@ -1,202 +0,0 @@ -package tracekv - -import ( - "encoding/base64" - "encoding/json" - "io" - - "cosmossdk.io/errors" - "cosmossdk.io/store/types" -) - -const ( - writeOp operation = "write" - readOp operation = "read" - deleteOp operation = "delete" - iterKeyOp operation = "iterKey" - iterValueOp operation = "iterValue" -) - -type ( - // Store implements the KVStore interface with tracing enabled. - // Operations are traced on each core KVStore call and written to the - // underlying io.writer. - // - // TODO: Should we use a buffered writer and implement Commit on - // Store? - Store struct { - parent types.KVStore - writer io.Writer - context types.TraceContext - } - - // operation represents an IO operation - operation string - - // traceOperation implements a traced KVStore operation - traceOperation struct { - Operation operation `json:"operation"` - Key string `json:"key"` - Value string `json:"value"` - Metadata map[string]interface{} `json:"metadata"` - } -) - -// NewStore returns a reference to a new traceKVStore given a parent -// KVStore implementation and a buffered writer. -func NewStore(parent types.KVStore, writer io.Writer, tc types.TraceContext) *Store { - return &Store{parent: parent, writer: writer, context: tc} -} - -// Get implements the KVStore interface. It traces a read operation and -// delegates a Get call to the parent KVStore. -func (tkv *Store) Get(key []byte) []byte { - value := tkv.parent.Get(key) - - writeOperation(tkv.writer, readOp, tkv.context, key, value) - return value -} - -// Set implements the KVStore interface. It traces a write operation and -// delegates the Set call to the parent KVStore. -func (tkv *Store) Set(key, value []byte) { - types.AssertValidKey(key) - writeOperation(tkv.writer, writeOp, tkv.context, key, value) - tkv.parent.Set(key, value) -} - -// Delete implements the KVStore interface. It traces a write operation and -// delegates the Delete call to the parent KVStore. -func (tkv *Store) Delete(key []byte) { - writeOperation(tkv.writer, deleteOp, tkv.context, key, nil) - tkv.parent.Delete(key) -} - -// Has implements the KVStore interface. It delegates the Has call to the -// parent KVStore. -func (tkv *Store) Has(key []byte) bool { - return tkv.parent.Has(key) -} - -// Iterator implements the KVStore interface. It delegates the Iterator call -// to the parent KVStore. -func (tkv *Store) Iterator(start, end []byte) types.Iterator { - return tkv.iterator(start, end, true) -} - -// ReverseIterator implements the KVStore interface. It delegates the -// ReverseIterator call to the parent KVStore. -func (tkv *Store) ReverseIterator(start, end []byte) types.Iterator { - return tkv.iterator(start, end, false) -} - -// iterator facilitates iteration over a KVStore. It delegates the necessary -// calls to it's parent KVStore. -func (tkv *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator - - if ascending { - parent = tkv.parent.Iterator(start, end) - } else { - parent = tkv.parent.ReverseIterator(start, end) - } - - return newTraceIterator(tkv.writer, parent, tkv.context) -} - -type traceIterator struct { - parent types.Iterator - writer io.Writer - context types.TraceContext -} - -func newTraceIterator(w io.Writer, parent types.Iterator, tc types.TraceContext) types.Iterator { - return &traceIterator{writer: w, parent: parent, context: tc} -} - -// Domain implements the Iterator interface. -func (ti *traceIterator) Domain() (start, end []byte) { - return ti.parent.Domain() -} - -// Valid implements the Iterator interface. -func (ti *traceIterator) Valid() bool { - return ti.parent.Valid() -} - -// Next implements the Iterator interface. -func (ti *traceIterator) Next() { - ti.parent.Next() -} - -// Key implements the Iterator interface. -func (ti *traceIterator) Key() []byte { - key := ti.parent.Key() - - writeOperation(ti.writer, iterKeyOp, ti.context, key, nil) - return key -} - -// Value implements the Iterator interface. -func (ti *traceIterator) Value() []byte { - value := ti.parent.Value() - - writeOperation(ti.writer, iterValueOp, ti.context, nil, value) - return value -} - -// Close implements the Iterator interface. -func (ti *traceIterator) Close() error { - return ti.parent.Close() -} - -// Error delegates the Error call to the parent iterator. -func (ti *traceIterator) Error() error { - return ti.parent.Error() -} - -// GetStoreType implements the KVStore interface. It returns the underlying -// KVStore type. -func (tkv *Store) GetStoreType() types.StoreType { - return tkv.parent.GetStoreType() -} - -// CacheWrap implements the KVStore interface. It panics because a Store -// cannot be branched. -func (tkv *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a TraceKVStore") -} - -// CacheWrapWithTrace implements the KVStore interface. It panics as a -// Store cannot be branched. -func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a TraceKVStore") -} - -// writeOperation writes a KVStore operation to the underlying io.Writer as -// JSON-encoded data where the key/value pair is base64 encoded. -func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) { - traceOp := traceOperation{ - Operation: op, - Key: base64.StdEncoding.EncodeToString(key), - Value: base64.StdEncoding.EncodeToString(value), - } - - if tc != nil { - traceOp.Metadata = tc - } - - raw, err := json.Marshal(traceOp) - if err != nil { - panic(errors.Wrap(err, "failed to serialize trace operation")) - } - - if _, err := w.Write(raw); err != nil { - panic(errors.Wrap(err, "failed to write trace operation")) - } - - _, err = io.WriteString(w, "\n") - if err != nil { - panic(errors.Wrap(err, "failed to write newline")) - } -} diff --git a/store/tracekv/store_test.go b/store/tracekv/store_test.go deleted file mode 100644 index 2c42734bae..0000000000 --- a/store/tracekv/store_test.go +++ /dev/null @@ -1,292 +0,0 @@ -package tracekv_test - -import ( - "bytes" - "fmt" - "io" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/prefix" - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" -) - -func bz(s string) []byte { return []byte(s) } - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -var kvPairs = []kv.Pair{ - {Key: keyFmt(1), Value: valFmt(1)}, - {Key: keyFmt(2), Value: valFmt(2)}, - {Key: keyFmt(3), Value: valFmt(3)}, -} - -func newTraceKVStore(w io.Writer) *tracekv.Store { - store := newEmptyTraceKVStore(w) - - for _, kvPair := range kvPairs { - store.Set(kvPair.Key, kvPair.Value) - } - - return store -} - -func newEmptyTraceKVStore(w io.Writer) *tracekv.Store { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) - - return tracekv.NewStore(memDB, w, tc) -} - -func TestTraceKVStoreGet(t *testing.T) { - testCases := []struct { - key []byte - expectedValue []byte - expectedOut string - }{ - { - key: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - expectedOut: "{\"operation\":\"read\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - { - key: []byte("does-not-exist"), - expectedValue: nil, - expectedOut: "{\"operation\":\"read\",\"key\":\"ZG9lcy1ub3QtZXhpc3Q=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - }, - } - - for _, tc := range testCases { - var buf bytes.Buffer - - store := newTraceKVStore(&buf) - buf.Reset() - value := store.Get(tc.key) - - require.Equal(t, tc.expectedValue, value) - require.Equal(t, tc.expectedOut, buf.String()) - } -} - -func TestTraceKVStoreSet(t *testing.T) { - testCases := []struct { - key []byte - value []byte - expectedOut string - }{ - { - key: kvPairs[0].Key, - value: kvPairs[0].Value, - expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - { - key: kvPairs[1].Key, - value: kvPairs[1].Value, - expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - { - key: kvPairs[2].Key, - value: kvPairs[2].Value, - expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - } - - for _, tc := range testCases { - var buf bytes.Buffer - - store := newEmptyTraceKVStore(&buf) - buf.Reset() - store.Set(tc.key, tc.value) - - require.Equal(t, tc.expectedOut, buf.String()) - } - - var buf bytes.Buffer - store := newEmptyTraceKVStore(&buf) - require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") - require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic") -} - -func TestTraceKVStoreDelete(t *testing.T) { - testCases := []struct { - key []byte - expectedOut string - }{ - { - key: kvPairs[0].Key, - expectedOut: "{\"operation\":\"delete\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - }, - } - - for _, tc := range testCases { - var buf bytes.Buffer - - store := newTraceKVStore(&buf) - buf.Reset() - store.Delete(tc.key) - - require.Equal(t, tc.expectedOut, buf.String()) - } -} - -func TestTraceKVStoreHas(t *testing.T) { - testCases := []struct { - key []byte - expected bool - }{ - { - key: kvPairs[0].Key, - expected: true, - }, - } - - for _, tc := range testCases { - var buf bytes.Buffer - - store := newTraceKVStore(&buf) - buf.Reset() - ok := store.Has(tc.key) - - require.Equal(t, tc.expected, ok) - } -} - -func TestTestTraceKVStoreIterator(t *testing.T) { - var buf bytes.Buffer - - store := newTraceKVStore(&buf) - iterator := store.Iterator(nil, nil) - - s, e := iterator.Domain() - require.Equal(t, []byte(nil), s) - require.Equal(t, []byte(nil), e) - - testCases := []struct { - expectedKey []byte - expectedValue []byte - expectedKeyOut string - expectedvalueOut string - }{ - { - expectedKey: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - { - expectedKey: kvPairs[1].Key, - expectedValue: kvPairs[1].Value, - expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - { - expectedKey: kvPairs[2].Key, - expectedValue: kvPairs[2].Value, - expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - } - - for _, tc := range testCases { - buf.Reset() - ka := iterator.Key() - require.Equal(t, tc.expectedKeyOut, buf.String()) - - buf.Reset() - va := iterator.Value() - require.Equal(t, tc.expectedvalueOut, buf.String()) - - require.Equal(t, tc.expectedKey, ka) - require.Equal(t, tc.expectedValue, va) - - iterator.Next() - } - - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) -} - -func TestTestTraceKVStoreReverseIterator(t *testing.T) { - var buf bytes.Buffer - - store := newTraceKVStore(&buf) - iterator := store.ReverseIterator(nil, nil) - - s, e := iterator.Domain() - require.Equal(t, []byte(nil), s) - require.Equal(t, []byte(nil), e) - - testCases := []struct { - expectedKey []byte - expectedValue []byte - expectedKeyOut string - expectedvalueOut string - }{ - { - expectedKey: kvPairs[2].Key, - expectedValue: kvPairs[2].Value, - expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - { - expectedKey: kvPairs[1].Key, - expectedValue: kvPairs[1].Value, - expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - { - expectedKey: kvPairs[0].Key, - expectedValue: kvPairs[0].Value, - expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n", - expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n", - }, - } - - for _, tc := range testCases { - buf.Reset() - ka := iterator.Key() - require.Equal(t, tc.expectedKeyOut, buf.String()) - - buf.Reset() - va := iterator.Value() - require.Equal(t, tc.expectedvalueOut, buf.String()) - - require.Equal(t, tc.expectedKey, ka) - require.Equal(t, tc.expectedValue, va) - - iterator.Next() - } - - require.False(t, iterator.Valid()) - require.Panics(t, iterator.Next) - require.NoError(t, iterator.Close()) -} - -func TestTraceKVStorePrefix(t *testing.T) { - store := newEmptyTraceKVStore(nil) - pStore := prefix.NewStore(store, []byte("trace_prefix")) - require.IsType(t, prefix.Store{}, pStore) -} - -func TestTraceKVStoreGetStoreType(t *testing.T) { - memDB := dbadapter.Store{DB: dbm.NewMemDB()} - store := newEmptyTraceKVStore(nil) - require.Equal(t, memDB.GetStoreType(), store.GetStoreType()) -} - -func TestTraceKVStoreCacheWrap(t *testing.T) { - store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) -} - -func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) { - store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) -} diff --git a/store/transient/store.go b/store/transient/store.go deleted file mode 100644 index d72a72476e..0000000000 --- a/store/transient/store.go +++ /dev/null @@ -1,53 +0,0 @@ -package transient - -import ( - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/dbadapter" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/types" -) - -var ( - _ types.Committer = (*Store)(nil) - _ types.KVStore = (*Store)(nil) -) - -// Store is a wrapper for a MemDB with Committer implementation -type Store struct { - dbadapter.Store -} - -// NewStore constructs new MemDB adapter -func NewStore() *Store { - return &Store{Store: dbadapter.Store{DB: dbm.NewMemDB()}} -} - -// Commit cleans up Store. -// Implements CommitStore -func (ts *Store) Commit() (id types.CommitID) { - ts.Store = dbadapter.Store{DB: dbm.NewMemDB()} - return -} - -func (ts *Store) SetPruning(_ pruningtypes.PruningOptions) {} - -// GetPruning is a no-op as pruning options cannot be directly set on this store. -// They must be set on the root commit multi-store. -func (ts *Store) GetPruning() pruningtypes.PruningOptions { - return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) -} - -// LastCommitID implements CommitStore -func (ts *Store) LastCommitID() types.CommitID { - return types.CommitID{} -} - -func (ts *Store) WorkingHash() []byte { - return []byte{} -} - -// GetStoreType implements Store. -func (ts *Store) GetStoreType() types.StoreType { - return types.StoreTypeTransient -} diff --git a/store/transient/store_test.go b/store/transient/store_test.go deleted file mode 100644 index 341ef41cc4..0000000000 --- a/store/transient/store_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package transient_test - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/require" - - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/transient" -) - -var k, v = []byte("hello"), []byte("world") - -func TestTransientStore(t *testing.T) { - tstore := transient.NewStore() - - require.Nil(t, tstore.Get(k)) - - tstore.Set(k, v) - - require.Equal(t, v, tstore.Get(k)) - - tstore.Commit() - - require.Nil(t, tstore.Get(k)) - - // no-op - tstore.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)) - - emptyCommitID := tstore.LastCommitID() - require.Equal(t, emptyCommitID.Version, int64(0)) - require.True(t, bytes.Equal(emptyCommitID.Hash, nil)) -} diff --git a/store/types/codec.go b/store/types/codec.go deleted file mode 100644 index 3b5203747c..0000000000 --- a/store/types/codec.go +++ /dev/null @@ -1,90 +0,0 @@ -package types - -import ( - "encoding/binary" - fmt "fmt" - - proto "github.com/cosmos/gogoproto/proto" -) - -// Codec defines a interface needed for the store package to marshal data -type Codec interface { - // Marshal returns binary encoding of v. - Marshal(proto.Message) ([]byte, error) - - // MarshalLengthPrefixed returns binary encoding of v with bytes length prefix. - MarshalLengthPrefixed(proto.Message) ([]byte, error) - - // Unmarshal parses the data encoded with Marshal method and stores the result - // in the value pointed to by v. - Unmarshal(bz []byte, ptr proto.Message) error - - // UnmarshalLengthPrefixed parses the data encoded with UnmarshalLengthPrefixed method and stores - // the result in the value pointed to by v. - UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error -} - -// ============= TestCodec ============= - -// TestCodec defines a codec that utilizes Protobuf for both binary and JSON -// encoding. -type TestCodec struct{} - -var _ Codec = &TestCodec{} - -func NewTestCodec() Codec { - return &TestCodec{} -} - -// Marshal implements BinaryMarshaler.Marshal method. -// NOTE: this function must be used with a concrete type which -// implements proto.Message. For interface please use the codec.MarshalInterface -func (pc *TestCodec) Marshal(o proto.Message) ([]byte, error) { - // Size() check can catch the typed nil value. - if o == nil || proto.Size(o) == 0 { - // return empty bytes instead of nil, because nil has special meaning in places like store.Set - return []byte{}, nil - } - return proto.Marshal(o) -} - -// MarshalLengthPrefixed implements BinaryMarshaler.MarshalLengthPrefixed method. -func (pc *TestCodec) MarshalLengthPrefixed(o proto.Message) ([]byte, error) { - bz, err := pc.Marshal(o) - if err != nil { - return nil, err - } - - var sizeBuf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(sizeBuf[:], uint64(len(bz))) - return append(sizeBuf[:n], bz...), nil -} - -// Unmarshal implements BinaryMarshaler.Unmarshal method. -// NOTE: this function must be used with a concrete type which -// implements proto.Message. For interface please use the codec.UnmarshalInterface -func (pc *TestCodec) Unmarshal(bz []byte, ptr proto.Message) error { - err := proto.Unmarshal(bz, ptr) - if err != nil { - return err - } - - return nil -} - -// UnmarshalLengthPrefixed implements BinaryMarshaler.UnmarshalLengthPrefixed method. -func (pc *TestCodec) UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error { - size, n := binary.Uvarint(bz) - if n < 0 { - return fmt.Errorf("invalid number of bytes read from length-prefixed encoding: %d", n) - } - - if size > uint64(len(bz)-n) { - return fmt.Errorf("not enough bytes to read; want: %v, got: %v", size, len(bz)-n) - } else if size < uint64(len(bz)-n) { - return fmt.Errorf("too many bytes to read; want: %v, got: %v", size, len(bz)-n) - } - - bz = bz[n:] - return proto.Unmarshal(bz, ptr) -} diff --git a/store/types/commit_info.go b/store/types/commit_info.go deleted file mode 100644 index e56193332e..0000000000 --- a/store/types/commit_info.go +++ /dev/null @@ -1,62 +0,0 @@ -package types - -import ( - "crypto/sha256" - - cmtprotocrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" - - "cosmossdk.io/store/internal/maps" -) - -// GetHash returns the GetHash from the CommitID. -// This is used in CommitInfo.Hash() -// -// When we commit to this in a merkle proof, we create a map of storeInfo.Name -> storeInfo.GetHash() -// and build a merkle proof from that. -// This is then chained with the substore proof, so we prove the root hash from the substore before this -// and need to pass that (unmodified) as the leaf value of the multistore proof. -func (si StoreInfo) GetHash() []byte { - return si.CommitId.Hash -} - -func (ci CommitInfo) toMap() map[string][]byte { - m := make(map[string][]byte, len(ci.StoreInfos)) - for _, storeInfo := range ci.StoreInfos { - m[storeInfo.Name] = storeInfo.GetHash() - } - - return m -} - -// Hash returns the simple merkle root hash of the stores sorted by name. -func (ci CommitInfo) Hash() []byte { - // we need a special case for empty set, as SimpleProofsFromMap requires at least one entry - if len(ci.StoreInfos) == 0 { - emptyHash := sha256.Sum256([]byte{}) - return emptyHash[:] - } - - rootHash, _, _ := maps.ProofsFromMap(ci.toMap()) - - if len(rootHash) == 0 { - emptyHash := sha256.Sum256([]byte{}) - return emptyHash[:] - } - - return rootHash -} - -func (ci CommitInfo) ProofOp(storeName string) cmtprotocrypto.ProofOp { - ret, err := ProofOpFromMap(ci.toMap(), storeName) - if err != nil { - panic(err) - } - return ret -} - -func (ci CommitInfo) CommitID() CommitID { - return CommitID{ - Version: ci.Version, - Hash: ci.Hash(), - } -} diff --git a/store/types/commit_info.pb.go b/store/types/commit_info.pb.go deleted file mode 100644 index 81220a79c2..0000000000 --- a/store/types/commit_info.pb.go +++ /dev/null @@ -1,864 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/v1beta1/commit_info.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" - _ "google.golang.org/protobuf/types/known/timestamppb" - io "io" - math "math" - math_bits "math/bits" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CommitInfo defines commit information used by the multi-store when committing -// a version/height. -type CommitInfo struct { - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - StoreInfos []StoreInfo `protobuf:"bytes,2,rep,name=store_infos,json=storeInfos,proto3" json:"store_infos"` - Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` -} - -func (m *CommitInfo) Reset() { *m = CommitInfo{} } -func (m *CommitInfo) String() string { return proto.CompactTextString(m) } -func (*CommitInfo) ProtoMessage() {} -func (*CommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_5f8c656cdef8c524, []int{0} -} -func (m *CommitInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CommitInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitInfo.Merge(m, src) -} -func (m *CommitInfo) XXX_Size() int { - return m.Size() -} -func (m *CommitInfo) XXX_DiscardUnknown() { - xxx_messageInfo_CommitInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitInfo proto.InternalMessageInfo - -func (m *CommitInfo) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *CommitInfo) GetStoreInfos() []StoreInfo { - if m != nil { - return m.StoreInfos - } - return nil -} - -func (m *CommitInfo) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -// StoreInfo defines store-specific commit information. It contains a reference -// between a store name and the commit ID. -type StoreInfo struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CommitId CommitID `protobuf:"bytes,2,opt,name=commit_id,json=commitId,proto3" json:"commit_id"` -} - -func (m *StoreInfo) Reset() { *m = StoreInfo{} } -func (m *StoreInfo) String() string { return proto.CompactTextString(m) } -func (*StoreInfo) ProtoMessage() {} -func (*StoreInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_5f8c656cdef8c524, []int{1} -} -func (m *StoreInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StoreInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StoreInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StoreInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_StoreInfo.Merge(m, src) -} -func (m *StoreInfo) XXX_Size() int { - return m.Size() -} -func (m *StoreInfo) XXX_DiscardUnknown() { - xxx_messageInfo_StoreInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_StoreInfo proto.InternalMessageInfo - -func (m *StoreInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *StoreInfo) GetCommitId() CommitID { - if m != nil { - return m.CommitId - } - return CommitID{} -} - -// CommitID defines the commitment information when a specific store is -// committed. -type CommitID struct { - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` -} - -func (m *CommitID) Reset() { *m = CommitID{} } -func (*CommitID) ProtoMessage() {} -func (*CommitID) Descriptor() ([]byte, []int) { - return fileDescriptor_5f8c656cdef8c524, []int{2} -} -func (m *CommitID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommitID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommitID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CommitID) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitID.Merge(m, src) -} -func (m *CommitID) XXX_Size() int { - return m.Size() -} -func (m *CommitID) XXX_DiscardUnknown() { - xxx_messageInfo_CommitID.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitID proto.InternalMessageInfo - -func (m *CommitID) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *CommitID) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func init() { - proto.RegisterType((*CommitInfo)(nil), "cosmos.store.v1beta1.CommitInfo") - proto.RegisterType((*StoreInfo)(nil), "cosmos.store.v1beta1.StoreInfo") - proto.RegisterType((*CommitID)(nil), "cosmos.store.v1beta1.CommitID") -} - -func init() { - proto.RegisterFile("cosmos/store/v1beta1/commit_info.proto", fileDescriptor_5f8c656cdef8c524) -} - -var fileDescriptor_5f8c656cdef8c524 = []byte{ - // 336 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xf2, 0x50, - 0x14, 0xc7, 0x7b, 0xa1, 0xf9, 0x3e, 0x7a, 0x70, 0xba, 0x61, 0x68, 0x18, 0x6e, 0x09, 0x83, 0x61, - 0xba, 0x0d, 0xb8, 0x39, 0x98, 0x58, 0x8d, 0x09, 0x6b, 0x75, 0x72, 0x31, 0x2d, 0x5c, 0x4a, 0xa3, - 0xed, 0x21, 0xdc, 0x2b, 0x89, 0x6f, 0xc1, 0xe8, 0xe8, 0x33, 0xf8, 0x14, 0x8c, 0x8c, 0x4e, 0x6a, - 0xe0, 0x45, 0x4c, 0x4f, 0x5b, 0x5c, 0x88, 0xdb, 0x39, 0xed, 0xef, 0x9c, 0xff, 0xaf, 0xa7, 0x70, - 0x3a, 0x41, 0x9d, 0xa1, 0xf6, 0xb5, 0xc1, 0xa5, 0xf2, 0x57, 0xc3, 0x58, 0x99, 0x68, 0xe8, 0x4f, - 0x30, 0xcb, 0x52, 0xf3, 0x90, 0xe6, 0x33, 0x94, 0x8b, 0x25, 0x1a, 0xe4, 0x9d, 0x92, 0x93, 0xc4, - 0xc9, 0x8a, 0xeb, 0x76, 0x12, 0x4c, 0x90, 0x00, 0xbf, 0xa8, 0x4a, 0xb6, 0xeb, 0x25, 0x88, 0xc9, - 0x93, 0xf2, 0xa9, 0x8b, 0x9f, 0x67, 0xbe, 0x49, 0x33, 0xa5, 0x4d, 0x94, 0x2d, 0x4a, 0xa0, 0xff, - 0xce, 0x00, 0xae, 0x28, 0x62, 0x9c, 0xcf, 0x90, 0xbb, 0xf0, 0x7f, 0xa5, 0x96, 0x3a, 0xc5, 0xdc, - 0x65, 0x3d, 0x36, 0x68, 0x86, 0x75, 0xcb, 0x6f, 0xa0, 0x4d, 0x81, 0x64, 0xa2, 0xdd, 0x46, 0xaf, - 0x39, 0x68, 0x8f, 0x3c, 0x79, 0xcc, 0x45, 0xde, 0x16, 0x5d, 0xb1, 0x2f, 0xb0, 0x37, 0x9f, 0x9e, - 0x15, 0x82, 0xae, 0x1f, 0x68, 0x1e, 0x80, 0x73, 0x70, 0x70, 0x9b, 0x3d, 0x36, 0x68, 0x8f, 0xba, - 0xb2, 0xb4, 0x94, 0xb5, 0xa5, 0xbc, 0xab, 0x89, 0xa0, 0x55, 0x2c, 0x58, 0x7f, 0x79, 0x2c, 0xfc, - 0x1d, 0xeb, 0xc7, 0xe0, 0x1c, 0x22, 0x38, 0x07, 0x3b, 0x8f, 0x32, 0x45, 0xbe, 0x4e, 0x48, 0x35, - 0xbf, 0x04, 0xa7, 0xbe, 0xdb, 0xd4, 0x6d, 0x50, 0x88, 0x38, 0xae, 0x5a, 0x7d, 0xfb, 0x75, 0x65, - 0xda, 0x2a, 0xc7, 0xc6, 0xd3, 0xfe, 0x05, 0xb4, 0xea, 0x77, 0x7f, 0x5c, 0x85, 0x83, 0x3d, 0x8f, - 0xf4, 0x9c, 0x32, 0x4e, 0x42, 0xaa, 0xcf, 0xed, 0xd7, 0x37, 0xcf, 0x0a, 0x46, 0x9b, 0x9d, 0x60, - 0xdb, 0x9d, 0x60, 0xdf, 0x3b, 0xc1, 0xd6, 0x7b, 0x61, 0x6d, 0xf7, 0xc2, 0xfa, 0xd8, 0x0b, 0xeb, - 0xde, 0x2d, 0x45, 0xf4, 0xf4, 0x51, 0xa6, 0x58, 0xfd, 0x6d, 0xf3, 0xb2, 0x50, 0x3a, 0xfe, 0x47, - 0x07, 0x38, 0xfb, 0x09, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb7, 0x0d, 0x59, 0x0a, 0x02, 0x00, 0x00, -} - -func (m *CommitInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintCommitInfo(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x1a - if len(m.StoreInfos) > 0 { - for iNdEx := len(m.StoreInfos) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.StoreInfos[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommitInfo(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Version != 0 { - i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *StoreInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StoreInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StoreInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.CommitId.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCommitInfo(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CommitID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CommitID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CommitID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x12 - } - if m.Version != 0 { - i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintCommitInfo(dAtA []byte, offset int, v uint64) int { - offset -= sovCommitInfo(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CommitInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Version != 0 { - n += 1 + sovCommitInfo(uint64(m.Version)) - } - if len(m.StoreInfos) > 0 { - for _, e := range m.StoreInfos { - l = e.Size() - n += 1 + l + sovCommitInfo(uint64(l)) - } - } - l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovCommitInfo(uint64(l)) - return n -} - -func (m *StoreInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovCommitInfo(uint64(l)) - } - l = m.CommitId.Size() - n += 1 + l + sovCommitInfo(uint64(l)) - return n -} - -func (m *CommitID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Version != 0 { - n += 1 + sovCommitInfo(uint64(m.Version)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovCommitInfo(uint64(l)) - } - return n -} - -func sovCommitInfo(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCommitInfo(x uint64) (n int) { - return sovCommitInfo(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CommitInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoreInfos", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoreInfos = append(m.StoreInfos, StoreInfo{}) - if err := m.StoreInfos[len(m.StoreInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommitInfo(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommitInfo - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StoreInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StoreInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StoreInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CommitId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CommitId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommitInfo(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommitInfo - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CommitID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommitID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommitID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCommitInfo - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCommitInfo - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCommitInfo(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCommitInfo - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCommitInfo(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCommitInfo - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCommitInfo - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCommitInfo - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCommitInfo - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCommitInfo = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCommitInfo = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCommitInfo = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/types/context.go b/store/types/context.go deleted file mode 100644 index 999539e16c..0000000000 --- a/store/types/context.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -import "cosmossdk.io/log" - -// Context is an interface used by an App to pass context information -// needed to process store streaming requests. -type Context interface { - BlockHeight() int64 - Logger() log.Logger - StreamingManager() StreamingManager -} diff --git a/store/types/errors.go b/store/types/errors.go deleted file mode 100644 index db86a3cc65..0000000000 --- a/store/types/errors.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -import ( - "cosmossdk.io/errors" -) - -const StoreCodespace = "store" - -var ( - // ErrInvalidProof is returned when a proof is invalid - ErrInvalidProof = errors.Register(StoreCodespace, 2, "invalid proof") - // ErrTxDecode is returned if we cannot parse a transaction - ErrTxDecode = errors.Register(StoreCodespace, 3, "tx parse error") - - // ErrUnknownRequest to doc - ErrUnknownRequest = errors.Register(StoreCodespace, 4, "unknown request") - - // ErrLogic defines an internal logic error, e.g. an invariant or assertion - // that is violated. It is a programmer error, not a user-facing error. - ErrLogic = errors.Register(StoreCodespace, 5, "internal logic error") - - // ErrConflict defines a conflict error, e.g. when two goroutines try to access - // the same resource and one of them fails. - ErrConflict = errors.Register(StoreCodespace, 6, "conflict") - // ErrInvalidRequest defines an ABCI typed error where the request contains - // invalid data. - ErrInvalidRequest = errors.Register(StoreCodespace, 7, "invalid request") -) diff --git a/store/types/gas.go b/store/types/gas.go deleted file mode 100644 index 46e7e4ebd0..0000000000 --- a/store/types/gas.go +++ /dev/null @@ -1,255 +0,0 @@ -package types - -import ( - "fmt" - "math" -) - -// Gas consumption descriptors. -const ( - GasIterNextCostFlatDesc = "IterNextFlat" - GasValuePerByteDesc = "ValuePerByte" - GasWritePerByteDesc = "WritePerByte" - GasReadPerByteDesc = "ReadPerByte" - GasWriteCostFlatDesc = "WriteFlat" - GasReadCostFlatDesc = "ReadFlat" - GasHasDesc = "Has" - GasDeleteDesc = "Delete" -) - -// Gas measured by the SDK -type Gas = uint64 - -// ErrorNegativeGasConsumed defines an error thrown when the amount of gas refunded results in a -// negative gas consumed amount. -type ErrorNegativeGasConsumed struct { - Descriptor string -} - -// ErrorOutOfGas defines an error thrown when an action results in out of gas. -type ErrorOutOfGas struct { - Descriptor string -} - -// ErrorGasOverflow defines an error thrown when an action results gas consumption -// unsigned integer overflow. -type ErrorGasOverflow struct { - Descriptor string -} - -// GasMeter interface to track gas consumption -type GasMeter interface { - GasConsumed() Gas - GasConsumedToLimit() Gas - GasRemaining() Gas - Limit() Gas - ConsumeGas(amount Gas, descriptor string) - RefundGas(amount Gas, descriptor string) - IsPastLimit() bool - IsOutOfGas() bool - String() string -} - -type basicGasMeter struct { - limit Gas - consumed Gas -} - -// NewGasMeter returns a reference to a new basicGasMeter. -func NewGasMeter(limit Gas) GasMeter { - return &basicGasMeter{ - limit: limit, - consumed: 0, - } -} - -// GasConsumed returns the gas consumed from the GasMeter. -func (g *basicGasMeter) GasConsumed() Gas { - return g.consumed -} - -// GasRemaining returns the gas left in the GasMeter. -func (g *basicGasMeter) GasRemaining() Gas { - if g.IsPastLimit() { - return 0 - } - return g.limit - g.consumed -} - -// Limit returns the gas limit of the GasMeter. -func (g *basicGasMeter) Limit() Gas { - return g.limit -} - -// GasConsumedToLimit returns the gas limit if gas consumed is past the limit, -// otherwise it returns the consumed gas. -// -// NOTE: This behavior is only called when recovering from panic when -// BlockGasMeter consumes gas past the limit. -func (g *basicGasMeter) GasConsumedToLimit() Gas { - if g.IsPastLimit() { - return g.limit - } - return g.consumed -} - -// addUint64Overflow performs the addition operation on two uint64 integers and -// returns a boolean on whether or not the result overflows. -func addUint64Overflow(a, b uint64) (uint64, bool) { - if math.MaxUint64-a < b { - return 0, true - } - - return a + b, false -} - -// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit or out of gas. -func (g *basicGasMeter) ConsumeGas(amount Gas, descriptor string) { - var overflow bool - g.consumed, overflow = addUint64Overflow(g.consumed, amount) - if overflow { - g.consumed = math.MaxUint64 - panic(ErrorGasOverflow{descriptor}) - } - - if g.consumed > g.limit { - panic(ErrorOutOfGas{descriptor}) - } -} - -// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the -// gas consumed, the function will panic. -// -// Use case: This functionality enables refunding gas to the transaction or block gas pools so that -// EVM-compatible chains can fully support the go-ethereum StateDb interface. -// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference. -func (g *basicGasMeter) RefundGas(amount Gas, descriptor string) { - if g.consumed < amount { - panic(ErrorNegativeGasConsumed{Descriptor: descriptor}) - } - - g.consumed -= amount -} - -// IsPastLimit returns true if gas consumed is past limit, otherwise it returns false. -func (g *basicGasMeter) IsPastLimit() bool { - return g.consumed > g.limit -} - -// IsOutOfGas returns true if gas consumed is greater than or equal to gas limit, otherwise it returns false. -func (g *basicGasMeter) IsOutOfGas() bool { - return g.consumed >= g.limit -} - -// String returns the BasicGasMeter's gas limit and gas consumed. -func (g *basicGasMeter) String() string { - return fmt.Sprintf("BasicGasMeter:\n limit: %d\n consumed: %d", g.limit, g.consumed) -} - -type infiniteGasMeter struct { - consumed Gas -} - -// NewInfiniteGasMeter returns a new gas meter without a limit. -func NewInfiniteGasMeter() GasMeter { - return &infiniteGasMeter{ - consumed: 0, - } -} - -// GasConsumed returns the gas consumed from the GasMeter. -func (g *infiniteGasMeter) GasConsumed() Gas { - return g.consumed -} - -// GasConsumedToLimit returns the gas consumed from the GasMeter since the gas is not confined to a limit. -// NOTE: This behavior is only called when recovering from panic when BlockGasMeter consumes gas past the limit. -func (g *infiniteGasMeter) GasConsumedToLimit() Gas { - return g.consumed -} - -// GasRemaining returns MaxUint64 since limit is not confined in infiniteGasMeter. -func (g *infiniteGasMeter) GasRemaining() Gas { - return math.MaxUint64 -} - -// Limit returns MaxUint64 since limit is not confined in infiniteGasMeter. -func (g *infiniteGasMeter) Limit() Gas { - return math.MaxUint64 -} - -// ConsumeGas adds the given amount of gas to the gas consumed and panics if it overflows the limit. -func (g *infiniteGasMeter) ConsumeGas(amount Gas, descriptor string) { - var overflow bool - // TODO: Should we set the consumed field after overflow checking? - g.consumed, overflow = addUint64Overflow(g.consumed, amount) - if overflow { - panic(ErrorGasOverflow{descriptor}) - } -} - -// RefundGas will deduct the given amount from the gas consumed. If the amount is greater than the -// gas consumed, the function will panic. -// -// Use case: This functionality enables refunding gas to the transaction or block gas pools so that -// EVM-compatible chains can fully support the go-ethereum StateDb interface. -// See https://github.com/cosmos/cosmos-sdk/pull/9403 for reference. -func (g *infiniteGasMeter) RefundGas(amount Gas, descriptor string) { - if g.consumed < amount { - panic(ErrorNegativeGasConsumed{Descriptor: descriptor}) - } - - g.consumed -= amount -} - -// IsPastLimit returns false since the gas limit is not confined. -func (g *infiniteGasMeter) IsPastLimit() bool { - return false -} - -// IsOutOfGas returns false since the gas limit is not confined. -func (g *infiniteGasMeter) IsOutOfGas() bool { - return false -} - -// String returns the InfiniteGasMeter's gas consumed. -func (g *infiniteGasMeter) String() string { - return fmt.Sprintf("InfiniteGasMeter:\n consumed: %d", g.consumed) -} - -// GasConfig defines gas cost for each operation on KVStores -type GasConfig struct { - HasCost Gas - DeleteCost Gas - ReadCostFlat Gas - ReadCostPerByte Gas - WriteCostFlat Gas - WriteCostPerByte Gas - IterNextCostFlat Gas -} - -// KVGasConfig returns a default gas config for KVStores. -func KVGasConfig() GasConfig { - return GasConfig{ - HasCost: 1000, - DeleteCost: 1000, - ReadCostFlat: 1000, - ReadCostPerByte: 3, - WriteCostFlat: 2000, - WriteCostPerByte: 30, - IterNextCostFlat: 30, - } -} - -// TransientGasConfig returns a default gas config for TransientStores. -func TransientGasConfig() GasConfig { - return GasConfig{ - HasCost: 100, - DeleteCost: 100, - ReadCostFlat: 100, - ReadCostPerByte: 0, - WriteCostFlat: 200, - WriteCostPerByte: 3, - IterNextCostFlat: 3, - } -} diff --git a/store/types/gas_test.go b/store/types/gas_test.go deleted file mode 100644 index f4b5a6abe5..0000000000 --- a/store/types/gas_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package types - -import ( - "math" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestInfiniteGasMeter(t *testing.T) { - t.Parallel() - meter := NewInfiniteGasMeter() - require.Equal(t, uint64(math.MaxUint64), meter.Limit()) - require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) - require.Equal(t, uint64(0), meter.GasConsumed()) - require.Equal(t, uint64(0), meter.GasConsumedToLimit()) - meter.ConsumeGas(10, "consume 10") - require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) - require.Equal(t, uint64(10), meter.GasConsumed()) - require.Equal(t, uint64(10), meter.GasConsumedToLimit()) - meter.RefundGas(1, "refund 1") - require.Equal(t, uint64(math.MaxUint64), meter.GasRemaining()) - require.Equal(t, uint64(9), meter.GasConsumed()) - require.False(t, meter.IsPastLimit()) - require.False(t, meter.IsOutOfGas()) - meter.ConsumeGas(Gas(math.MaxUint64/2), "consume half max uint64") - require.Panics(t, func() { meter.ConsumeGas(Gas(math.MaxUint64/2)+2, "panic") }) - require.Panics(t, func() { meter.RefundGas(meter.GasConsumed()+1, "refund greater than consumed") }) -} - -func TestGasMeter(t *testing.T) { - t.Parallel() - cases := []struct { - limit Gas - usage []Gas - }{ - {10, []Gas{1, 2, 3, 4}}, - {1000, []Gas{40, 30, 20, 10, 900}}, - {100000, []Gas{99999, 1}}, - {100000000, []Gas{50000000, 40000000, 10000000}}, - {65535, []Gas{32768, 32767}}, - {65536, []Gas{32768, 32767, 1}}, - } - - for tcnum, tc := range cases { - meter := NewGasMeter(tc.limit) - used := uint64(0) - - for unum, usage := range tc.usage { - usage := usage - used += usage - require.NotPanics(t, func() { meter.ConsumeGas(usage, "") }, "Not exceeded limit but panicked. tc #%d, usage #%d", tcnum, unum) - require.Equal(t, used, meter.GasConsumed(), "Gas consumption not match. tc #%d, usage #%d", tcnum, unum) - require.Equal(t, tc.limit-used, meter.GasRemaining(), "Gas left not match. tc #%d, usage #%d", tcnum, unum) - require.Equal(t, used, meter.GasConsumedToLimit(), "Gas consumption (to limit) not match. tc #%d, usage #%d", tcnum, unum) - require.False(t, meter.IsPastLimit(), "Not exceeded limit but got IsPastLimit() true") - if unum < len(tc.usage)-1 { - require.False(t, meter.IsOutOfGas(), "Not yet at limit but got IsOutOfGas() true") - } else { - require.True(t, meter.IsOutOfGas(), "At limit but got IsOutOfGas() false") - } - } - - require.Panics(t, func() { meter.ConsumeGas(1, "") }, "Exceeded but not panicked. tc #%d", tcnum) - require.Equal(t, meter.GasConsumedToLimit(), meter.Limit(), "Gas consumption (to limit) not match limit") - require.Equal(t, meter.GasConsumed(), meter.Limit()+1, "Gas consumption not match limit+1") - require.Equal(t, uint64(0), meter.GasRemaining()) - - require.NotPanics(t, func() { meter.RefundGas(1, "refund 1") }) - require.Equal(t, meter.GasConsumed(), meter.Limit(), "Gas consumption not match with limit") - require.Equal(t, uint64(0), meter.GasRemaining()) - require.Panics(t, func() { meter.RefundGas(meter.GasConsumed()+1, "refund greater than consumed") }) - - require.NotPanics(t, func() { meter.RefundGas(meter.GasConsumed(), "refund consumed gas") }) - require.Equal(t, meter.Limit(), meter.GasRemaining()) - - meter2 := NewGasMeter(math.MaxUint64) - require.Equal(t, uint64(math.MaxUint64), meter2.GasRemaining()) - meter2.ConsumeGas(Gas(math.MaxUint64/2), "consume half max uint64") - require.Equal(t, Gas(math.MaxUint64-(math.MaxUint64/2)), meter2.GasRemaining()) - require.Panics(t, func() { meter2.ConsumeGas(Gas(math.MaxUint64/2)+2, "panic") }) - } -} - -func TestAddUint64Overflow(t *testing.T) { - t.Parallel() - testCases := []struct { - a, b uint64 - result uint64 - overflow bool - }{ - {0, 0, 0, false}, - {100, 100, 200, false}, - {math.MaxUint64 / 2, math.MaxUint64/2 + 1, math.MaxUint64, false}, - {math.MaxUint64 / 2, math.MaxUint64/2 + 2, 0, true}, - } - - for i, tc := range testCases { - res, overflow := addUint64Overflow(tc.a, tc.b) - require.Equal( - t, tc.overflow, overflow, - "invalid overflow result; tc: #%d, a: %d, b: %d", i, tc.a, tc.b, - ) - require.Equal( - t, tc.result, res, - "invalid uint64 result; tc: #%d, a: %d, b: %d", i, tc.a, tc.b, - ) - } -} - -func TestTransientGasConfig(t *testing.T) { - t.Parallel() - config := TransientGasConfig() - require.Equal(t, config, GasConfig{ - HasCost: 100, - DeleteCost: 100, - ReadCostFlat: 100, - ReadCostPerByte: 0, - WriteCostFlat: 200, - WriteCostPerByte: 3, - IterNextCostFlat: 3, - }) -} diff --git a/store/types/iterator.go b/store/types/iterator.go deleted file mode 100644 index a328e87a68..0000000000 --- a/store/types/iterator.go +++ /dev/null @@ -1,60 +0,0 @@ -package types - -import ( - "fmt" -) - -// KVStorePrefixIteratorPaginated returns iterator over items in the selected page. -// Items iterated and skipped in ascending order. -func KVStorePrefixIteratorPaginated(kvs KVStore, prefix []byte, page, limit uint) Iterator { - pi := &PaginatedIterator{ - Iterator: KVStorePrefixIterator(kvs, prefix), - page: page, - limit: limit, - } - pi.skip() - return pi -} - -// KVStoreReversePrefixIteratorPaginated returns iterator over items in the selected page. -// Items iterated and skipped in descending order. -func KVStoreReversePrefixIteratorPaginated(kvs KVStore, prefix []byte, page, limit uint) Iterator { - pi := &PaginatedIterator{ - Iterator: KVStoreReversePrefixIterator(kvs, prefix), - page: page, - limit: limit, - } - pi.skip() - return pi -} - -// PaginatedIterator is a wrapper around Iterator that iterates over values starting for given page and limit. -type PaginatedIterator struct { - Iterator - - page, limit uint // provided during initialization - iterated uint // incremented in a call to Next -} - -func (pi *PaginatedIterator) skip() { - for i := (pi.page - 1) * pi.limit; i > 0 && pi.Iterator.Valid(); i-- { - pi.Iterator.Next() - } -} - -// Next will panic after limit is reached. -func (pi *PaginatedIterator) Next() { - if !pi.Valid() { - panic(fmt.Sprintf("PaginatedIterator reached limit %d", pi.limit)) - } - pi.Iterator.Next() - pi.iterated++ -} - -// Valid if below limit and underlying iterator is valid. -func (pi *PaginatedIterator) Valid() bool { - if pi.iterated >= pi.limit { - return false - } - return pi.Iterator.Valid() -} diff --git a/store/types/iterator_test.go b/store/types/iterator_test.go deleted file mode 100644 index a804b092c8..0000000000 --- a/store/types/iterator_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package types_test - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/log" - "cosmossdk.io/store/iavl" - "cosmossdk.io/store/metrics" - "cosmossdk.io/store/types" -) - -func newMemTestKVStore(t *testing.T) types.KVStore { - t.Helper() - db := dbm.NewMemDB() - store, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics()) - require.NoError(t, err) - return store -} - -func TestPaginatedIterator(t *testing.T) { - kvs := newMemTestKVStore(t) - total := 10 - lth := total - 1 - asc := make([][]byte, total) - desc := make([][]byte, total) - // store returns values in lexicographic order (or reverse lex order) - for i := 0; i < total; i++ { - key := []byte{byte(i)} - kvs.Set(key, key) - asc[i] = key - desc[lth-i] = key - } - type testCase struct { - desc string - page, limit uint - result [][]byte - reverse bool - } - for _, tc := range []testCase{ - { - desc: "FirstChunk", - page: 1, - limit: 4, - result: asc[:4], - }, - { - desc: "SecondChunk", - page: 2, - limit: 4, - result: asc[4:8], - }, - { - desc: "ThirdChunkHalf", - page: 3, - limit: 4, - result: asc[8:], - }, - { - desc: "OverLimit", - page: 10, - limit: 10, - result: [][]byte{}, - }, - { - desc: "ZeroLimit", - page: 1, - result: [][]byte{}, - }, - { - desc: "ReverseFirstChunk", - page: 1, - limit: 6, - result: desc[:6], - reverse: true, - }, - { - desc: "ReverseSecondChunk", - page: 2, - limit: 6, - result: desc[6:], - reverse: true, - }, - } { - tc := tc - t.Run(tc.desc, func(t *testing.T) { - var iter types.Iterator - if tc.reverse { - iter = types.KVStoreReversePrefixIteratorPaginated(kvs, nil, tc.page, tc.limit) - } else { - iter = types.KVStorePrefixIteratorPaginated(kvs, nil, tc.page, tc.limit) - } - defer iter.Close() - - result := [][]byte{} - for ; iter.Valid(); iter.Next() { - result = append(result, iter.Key()) - } - - require.Equal(t, tc.result, result) - require.False(t, iter.Valid()) - }) - } -} - -func TestPaginatedIteratorPanicIfInvalid(t *testing.T) { - kvs := newMemTestKVStore(t) - - iter := types.KVStorePrefixIteratorPaginated(kvs, nil, 1, 1) - defer iter.Close() - require.False(t, iter.Valid()) - require.Panics(t, func() { iter.Next() }) // "iterator is empty" - - kvs.Set([]byte{1}, []byte{}) - - iter = types.KVStorePrefixIteratorPaginated(kvs, nil, 1, 0) - defer iter.Close() - require.False(t, iter.Valid()) - require.Panics(t, func() { iter.Next() }) // "not empty but limit is zero" -} diff --git a/store/types/listening.go b/store/types/listening.go deleted file mode 100644 index 75828793ff..0000000000 --- a/store/types/listening.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -// MemoryListener listens to the state writes and accumulate the records in memory. -type MemoryListener struct { - stateCache []*StoreKVPair -} - -// NewMemoryListener creates a listener that accumulate the state writes in memory. -func NewMemoryListener() *MemoryListener { - return &MemoryListener{} -} - -// OnWrite implements MemoryListener interface -func (fl *MemoryListener) OnWrite(storeKey StoreKey, key, value []byte, delete bool) { - fl.stateCache = append(fl.stateCache, &StoreKVPair{ - StoreKey: storeKey.Name(), - Delete: delete, - Key: key, - Value: value, - }) -} - -// PopStateCache returns the current state caches and set to nil -func (fl *MemoryListener) PopStateCache() []*StoreKVPair { - res := fl.stateCache - fl.stateCache = nil - return res -} diff --git a/store/types/listening.pb.go b/store/types/listening.pb.go deleted file mode 100644 index 1821ca474a..0000000000 --- a/store/types/listening.pb.go +++ /dev/null @@ -1,785 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/v1beta1/listening.proto - -package types - -import ( - fmt "fmt" - v1 "github.com/cometbft/cometbft/api/cometbft/abci/v1" - _ "github.com/cosmos/cosmos-proto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and -// Deletes) It optionally includes the StoreKey for the originating KVStore and -// a Boolean flag to distinguish between Sets and Deletes -type StoreKVPair struct { - StoreKey string `protobuf:"bytes,1,opt,name=store_key,json=storeKey,proto3" json:"store_key,omitempty"` - Delete bool `protobuf:"varint,2,opt,name=delete,proto3" json:"delete,omitempty"` - Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *StoreKVPair) Reset() { *m = StoreKVPair{} } -func (m *StoreKVPair) String() string { return proto.CompactTextString(m) } -func (*StoreKVPair) ProtoMessage() {} -func (*StoreKVPair) Descriptor() ([]byte, []int) { - return fileDescriptor_b6caeb9d7b7c7c10, []int{0} -} -func (m *StoreKVPair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StoreKVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StoreKVPair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StoreKVPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_StoreKVPair.Merge(m, src) -} -func (m *StoreKVPair) XXX_Size() int { - return m.Size() -} -func (m *StoreKVPair) XXX_DiscardUnknown() { - xxx_messageInfo_StoreKVPair.DiscardUnknown(m) -} - -var xxx_messageInfo_StoreKVPair proto.InternalMessageInfo - -func (m *StoreKVPair) GetStoreKey() string { - if m != nil { - return m.StoreKey - } - return "" -} - -func (m *StoreKVPair) GetDelete() bool { - if m != nil { - return m.Delete - } - return false -} - -func (m *StoreKVPair) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *StoreKVPair) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -// BlockMetadata contains all the abci event data of a block -// the file streamer dump them into files together with the state changes. -type BlockMetadata struct { - ResponseCommit *v1.CommitResponse `protobuf:"bytes,6,opt,name=response_commit,json=responseCommit,proto3" json:"response_commit,omitempty"` - RequestFinalizeBlock *v1.FinalizeBlockRequest `protobuf:"bytes,7,opt,name=request_finalize_block,json=requestFinalizeBlock,proto3" json:"request_finalize_block,omitempty"` - ResponseFinalizeBlock *v1.FinalizeBlockResponse `protobuf:"bytes,8,opt,name=response_finalize_block,json=responseFinalizeBlock,proto3" json:"response_finalize_block,omitempty"` -} - -func (m *BlockMetadata) Reset() { *m = BlockMetadata{} } -func (m *BlockMetadata) String() string { return proto.CompactTextString(m) } -func (*BlockMetadata) ProtoMessage() {} -func (*BlockMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_b6caeb9d7b7c7c10, []int{1} -} -func (m *BlockMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlockMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockMetadata.Merge(m, src) -} -func (m *BlockMetadata) XXX_Size() int { - return m.Size() -} -func (m *BlockMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_BlockMetadata.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockMetadata proto.InternalMessageInfo - -func (m *BlockMetadata) GetResponseCommit() *v1.CommitResponse { - if m != nil { - return m.ResponseCommit - } - return nil -} - -func (m *BlockMetadata) GetRequestFinalizeBlock() *v1.FinalizeBlockRequest { - if m != nil { - return m.RequestFinalizeBlock - } - return nil -} - -func (m *BlockMetadata) GetResponseFinalizeBlock() *v1.FinalizeBlockResponse { - if m != nil { - return m.ResponseFinalizeBlock - } - return nil -} - -func init() { - proto.RegisterType((*StoreKVPair)(nil), "cosmos.store.v1beta1.StoreKVPair") - proto.RegisterType((*BlockMetadata)(nil), "cosmos.store.v1beta1.BlockMetadata") -} - -func init() { - proto.RegisterFile("cosmos/store/v1beta1/listening.proto", fileDescriptor_b6caeb9d7b7c7c10) -} - -var fileDescriptor_b6caeb9d7b7c7c10 = []byte{ - // 416 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0xeb, 0xd6, 0x2d, 0x9e, 0x07, 0x2c, 0x32, 0x65, 0x84, 0x81, 0xa2, 0x68, 0x42, 0xd0, - 0xcb, 0x1c, 0xba, 0x71, 0xe2, 0x38, 0x24, 0x24, 0x32, 0x21, 0xa1, 0x20, 0x71, 0x40, 0x48, 0x91, - 0x93, 0xbe, 0x21, 0xab, 0x69, 0x5c, 0x62, 0x2f, 0x52, 0xb9, 0xf0, 0x15, 0xf8, 0x30, 0x48, 0x7c, - 0x05, 0x8e, 0x13, 0x27, 0x8e, 0xa8, 0xfd, 0x22, 0x28, 0xb6, 0x8b, 0x34, 0x38, 0xec, 0x96, 0xff, - 0x7b, 0xff, 0xff, 0xcf, 0xcf, 0xf1, 0xa3, 0x8f, 0x4a, 0xa5, 0x17, 0x4a, 0x27, 0xda, 0xa8, 0x06, - 0x92, 0x76, 0x5a, 0x80, 0x11, 0xd3, 0xa4, 0x92, 0xda, 0x40, 0x2d, 0xeb, 0x8f, 0x7c, 0xd9, 0x28, - 0xa3, 0xd8, 0xd8, 0xb9, 0xb8, 0x75, 0x71, 0xef, 0x3a, 0x78, 0x58, 0xaa, 0x05, 0x98, 0xe2, 0xdc, - 0x24, 0xa2, 0x28, 0x65, 0xd2, 0x4e, 0x13, 0xb3, 0x5a, 0x82, 0x76, 0x99, 0x83, 0xfb, 0x2e, 0x93, - 0x5b, 0x95, 0x78, 0x80, 0x15, 0x87, 0x5f, 0xe8, 0xee, 0xdb, 0x8e, 0x74, 0xf6, 0xee, 0x8d, 0x90, - 0x0d, 0x7b, 0x40, 0x77, 0x2c, 0x38, 0x9f, 0xc3, 0x2a, 0x44, 0x31, 0x9a, 0xec, 0x64, 0xc4, 0x16, - 0xce, 0x60, 0xc5, 0xf6, 0xe9, 0x68, 0x06, 0x15, 0x18, 0x08, 0xfb, 0x31, 0x9a, 0x90, 0xcc, 0x2b, - 0x16, 0xd0, 0x41, 0x67, 0x1f, 0xc4, 0x68, 0x72, 0x33, 0xeb, 0x3e, 0xd9, 0x98, 0x0e, 0x5b, 0x51, - 0x5d, 0x40, 0x88, 0x6d, 0xcd, 0x89, 0xe7, 0x77, 0x7e, 0x7e, 0x3b, 0xda, 0x73, 0xa7, 0x1f, 0xe9, - 0xd9, 0x3c, 0x7e, 0xca, 0x9f, 0x9d, 0x1c, 0x7e, 0xef, 0xd3, 0x5b, 0xa7, 0x95, 0x2a, 0xe7, 0xaf, - 0xc1, 0x88, 0x99, 0x30, 0x82, 0xbd, 0xa2, 0x7b, 0x0d, 0xe8, 0xa5, 0xaa, 0x35, 0xe4, 0xa5, 0x5a, - 0x2c, 0xa4, 0x09, 0x47, 0x31, 0x9a, 0xec, 0x1e, 0xc7, 0x7c, 0x7b, 0x4b, 0xde, 0xdd, 0x92, 0xb7, - 0x53, 0xfe, 0xc2, 0xf6, 0x33, 0x6f, 0xcf, 0x6e, 0x6f, 0x83, 0xae, 0xce, 0x3e, 0xd0, 0xfd, 0x06, - 0x3e, 0x5d, 0x80, 0x36, 0xf9, 0xb9, 0xac, 0x45, 0x25, 0x3f, 0x43, 0x5e, 0x74, 0x87, 0x85, 0x37, - 0x2c, 0xf1, 0xf1, 0xff, 0xc4, 0x97, 0xde, 0x67, 0x67, 0xca, 0x5c, 0x38, 0x1b, 0x7b, 0xca, 0x95, - 0x26, 0xcb, 0xe9, 0xbd, 0xbf, 0x83, 0xfe, 0x83, 0x27, 0x16, 0xff, 0xe4, 0x5a, 0xbc, 0x9f, 0xfb, - 0xee, 0x96, 0x73, 0xa5, 0x9d, 0x62, 0x82, 0x82, 0x7e, 0x8a, 0x49, 0x3f, 0x18, 0xa4, 0x98, 0x0c, - 0x02, 0x9c, 0x62, 0x82, 0x83, 0x61, 0x8a, 0xc9, 0x30, 0x18, 0x9d, 0x1e, 0xff, 0x58, 0x47, 0xe8, - 0x72, 0x1d, 0xa1, 0xdf, 0xeb, 0x08, 0x7d, 0xdd, 0x44, 0xbd, 0xcb, 0x4d, 0xd4, 0xfb, 0xb5, 0x89, - 0x7a, 0xef, 0x43, 0xf7, 0x93, 0xf5, 0x6c, 0xce, 0xa5, 0xf2, 0xfb, 0x64, 0xf7, 0xa1, 0x18, 0xd9, - 0x57, 0x3f, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, 0x98, 0x9f, 0x12, 0x13, 0x6c, 0x02, 0x00, 0x00, -} - -func (m *StoreKVPair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StoreKVPair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StoreKVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintListening(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x22 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintListening(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x1a - } - if m.Delete { - i-- - if m.Delete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.StoreKey) > 0 { - i -= len(m.StoreKey) - copy(dAtA[i:], m.StoreKey) - i = encodeVarintListening(dAtA, i, uint64(len(m.StoreKey))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *BlockMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlockMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlockMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ResponseFinalizeBlock != nil { - { - size, err := m.ResponseFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintListening(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.RequestFinalizeBlock != nil { - { - size, err := m.RequestFinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintListening(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.ResponseCommit != nil { - { - size, err := m.ResponseCommit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintListening(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} - -func encodeVarintListening(dAtA []byte, offset int, v uint64) int { - offset -= sovListening(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *StoreKVPair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.StoreKey) - if l > 0 { - n += 1 + l + sovListening(uint64(l)) - } - if m.Delete { - n += 2 - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovListening(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovListening(uint64(l)) - } - return n -} - -func (m *BlockMetadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ResponseCommit != nil { - l = m.ResponseCommit.Size() - n += 1 + l + sovListening(uint64(l)) - } - if m.RequestFinalizeBlock != nil { - l = m.RequestFinalizeBlock.Size() - n += 1 + l + sovListening(uint64(l)) - } - if m.ResponseFinalizeBlock != nil { - l = m.ResponseFinalizeBlock.Size() - n += 1 + l + sovListening(uint64(l)) - } - return n -} - -func sovListening(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozListening(x uint64) (n int) { - return sovListening(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *StoreKVPair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StoreKVPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StoreKVPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoreKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoreKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Delete = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipListening(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthListening - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlockMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlockMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlockMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseCommit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResponseCommit == nil { - m.ResponseCommit = &v1.CommitResponse{} - } - if err := m.ResponseCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestFinalizeBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestFinalizeBlock == nil { - m.RequestFinalizeBlock = &v1.FinalizeBlockRequest{} - } - if err := m.RequestFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseFinalizeBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowListening - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthListening - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthListening - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResponseFinalizeBlock == nil { - m.ResponseFinalizeBlock = &v1.FinalizeBlockResponse{} - } - if err := m.ResponseFinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipListening(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthListening - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipListening(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowListening - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowListening - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowListening - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthListening - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupListening - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthListening - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthListening = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowListening = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupListening = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/types/listening_test.go b/store/types/listening_test.go deleted file mode 100644 index 034d2a4960..0000000000 --- a/store/types/listening_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNewStoreKVPairWriteListener(t *testing.T) { - listener := NewMemoryListener() - require.IsType(t, &MemoryListener{}, listener) -} - -func TestOnWrite(t *testing.T) { - listener := NewMemoryListener() - - testStoreKey := NewKVStoreKey("test_key") - testKey := []byte("testing123") - testValue := []byte("testing321") - - // test set - listener.OnWrite(testStoreKey, testKey, testValue, false) - outputKVPair := listener.PopStateCache()[0] - expectedOutputKVPair := &StoreKVPair{ - Key: testKey, - Value: testValue, - StoreKey: testStoreKey.Name(), - Delete: false, - } - require.EqualValues(t, expectedOutputKVPair, outputKVPair) - - // test delete - listener.OnWrite(testStoreKey, testKey, testValue, true) - outputKVPair = listener.PopStateCache()[0] - expectedOutputKVPair = &StoreKVPair{ - Key: testKey, - Value: testValue, - StoreKey: testStoreKey.Name(), - Delete: true, - } - require.EqualValues(t, expectedOutputKVPair, outputKVPair) -} diff --git a/store/types/logger.go b/store/types/logger.go deleted file mode 100644 index abce36c86a..0000000000 --- a/store/types/logger.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// Logger defines basic logger that store expects. -// It is a subset of the cosmossdk.io/log.Logger interface. -type Logger interface { - // Info takes a message and a set of key/value pairs and logs with level INFO. - // The key of the tuple must be a string. - Info(msg string, keyVals ...any) - - // Warn takes a message and a set of key/value pairs and logs with level WARN. - // The key of the tuple must be a string. - Warn(msg string, keyVals ...any) - - // Error takes a message and a set of key/value pairs and logs with level ERR. - // The key of the tuple must be a string. - Error(msg string, keyVals ...any) - - // Debug takes a message and a set of key/value pairs and logs with level DEBUG. - // The key of the tuple must be a string. - Debug(msg string, keyVals ...any) -} diff --git a/store/types/proof.go b/store/types/proof.go deleted file mode 100644 index d14f78ba0d..0000000000 --- a/store/types/proof.go +++ /dev/null @@ -1,174 +0,0 @@ -package types - -import ( - "fmt" - - cmtprotocrypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" - "github.com/cometbft/cometbft/crypto/merkle" - ics23 "github.com/cosmos/ics23/go" - - errorsmod "cosmossdk.io/errors" - sdkmaps "cosmossdk.io/store/internal/maps" - sdkproofs "cosmossdk.io/store/internal/proofs" -) - -const ( - ProofOpIAVLCommitment = "ics23:iavl" - ProofOpSimpleMerkleCommitment = "ics23:simple" - ProofOpSMTCommitment = "ics23:smt" -) - -// CommitmentOp implements merkle.ProofOperator by wrapping an ics23 CommitmentProof -// It also contains a Key field to determine which key the proof is proving. -// NOTE: CommitmentProof currently can either be ExistenceProof or NonexistenceProof -// -// Type and Spec are classified by the kind of merkle proof it represents allowing -// the code to be reused by more types. Spec is never on the wire, but mapped from type in the code. -type CommitmentOp struct { - Type string - Spec *ics23.ProofSpec - Key []byte - Proof *ics23.CommitmentProof -} - -var _ merkle.ProofOperator = CommitmentOp{} - -func NewIavlCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpIAVLCommitment, - Spec: ics23.IavlSpec, - Key: key, - Proof: proof, - } -} - -func NewSimpleMerkleCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpSimpleMerkleCommitment, - Spec: ics23.TendermintSpec, - Key: key, - Proof: proof, - } -} - -func NewSmtCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpSMTCommitment, - Spec: ics23.SmtSpec, - Key: key, - Proof: proof, - } -} - -// CommitmentOpDecoder takes a merkle.ProofOp and attempts to decode it into a CommitmentOp ProofOperator -// The proofOp.Data is just a marshaled CommitmentProof. The Key of the CommitmentOp is extracted -// from the unmarshalled proof. -func CommitmentOpDecoder(pop cmtprotocrypto.ProofOp) (merkle.ProofOperator, error) { - var spec *ics23.ProofSpec - switch pop.Type { - case ProofOpIAVLCommitment: - spec = ics23.IavlSpec - case ProofOpSimpleMerkleCommitment: - spec = ics23.TendermintSpec - case ProofOpSMTCommitment: - spec = ics23.SmtSpec - default: - return nil, errorsmod.Wrapf(ErrInvalidProof, "unexpected ProofOp.Type; got %s, want supported ics23 subtypes 'ProofOpSimpleMerkleCommitment', 'ProofOpIAVLCommitment', or 'ProofOpSMTCommitment'", pop.Type) - } - - proof := &ics23.CommitmentProof{} - err := proof.Unmarshal(pop.Data) - if err != nil { - return nil, err - } - - op := CommitmentOp{ - Type: pop.Type, - Key: pop.Key, - Spec: spec, - Proof: proof, - } - return op, nil -} - -func (op CommitmentOp) GetKey() []byte { - return op.Key -} - -// Run takes in a list of arguments and attempts to run the proof op against these arguments -// Returns the root wrapped in [][]byte if the proof op succeeds with given args. If not, -// it will return an error. -// -// CommitmentOp will accept args of length 1 or length 0 -// If length 1 args is passed in, then CommitmentOp will attempt to prove the existence of the key -// with the value provided by args[0] using the embedded CommitmentProof and return the CommitmentRoot of the proof -// If length 0 args is passed in, then CommitmentOp will attempt to prove the absence of the key -// in the CommitmentOp and return the CommitmentRoot of the proof -func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) { - // calculate root from proof - root, err := op.Proof.Calculate() - if err != nil { - return nil, errorsmod.Wrapf(ErrInvalidProof, "could not calculate root for proof: %v", err) - } - // Only support an existence proof or nonexistence proof (batch proofs currently unsupported) - switch len(args) { - case 0: - // Args are nil, so we verify the absence of the key. - absent := ics23.VerifyNonMembership(op.Spec, root, op.Proof, op.Key) - if !absent { - return nil, errorsmod.Wrapf(ErrInvalidProof, "proof did not verify absence of key: %s", string(op.Key)) - } - - case 1: - // Args is length 1, verify existence of key with value args[0] - if !ics23.VerifyMembership(op.Spec, root, op.Proof, op.Key, args[0]) { - return nil, errorsmod.Wrapf(ErrInvalidProof, "proof did not verify existence of key %s with given value %x", op.Key, args[0]) - } - default: - return nil, errorsmod.Wrapf(ErrInvalidProof, "args must be length 0 or 1, got: %d", len(args)) - } - - return [][]byte{root}, nil -} - -// ProofOp implements ProofOperator interface and converts a CommitmentOp -// into a merkle.ProofOp format that can later be decoded by CommitmentOpDecoder -// back into a CommitmentOp for proof verification -func (op CommitmentOp) ProofOp() cmtprotocrypto.ProofOp { - bz, err := op.Proof.Marshal() - if err != nil { - panic(err.Error()) - } - return cmtprotocrypto.ProofOp{ - Type: op.Type, - Key: op.Key, - Data: bz, - } -} - -// ProofOpFromMap generates a single proof from a map and converts it to a ProofOp. -func ProofOpFromMap(cmap map[string][]byte, storeName string) (ret cmtprotocrypto.ProofOp, err error) { - _, proofs, _ := sdkmaps.ProofsFromMap(cmap) - - proof := proofs[storeName] - if proof == nil { - err = fmt.Errorf("ProofOp for %s but not registered store name", storeName) - return - } - - // convert merkle.SimpleProof to CommitmentProof - existProof, err := sdkproofs.ConvertExistenceProof(proof, []byte(storeName), cmap[storeName]) - if err != nil { - err = fmt.Errorf("could not convert simple proof to existence proof: %w", err) - return - } - - commitmentProof := &ics23.CommitmentProof{ - Proof: &ics23.CommitmentProof_Exist{ - Exist: existProof, - }, - } - - ret = NewSimpleMerkleCommitmentOp([]byte(storeName), commitmentProof).ProofOp() - return -} diff --git a/store/types/store.go b/store/types/store.go deleted file mode 100644 index 7846876eeb..0000000000 --- a/store/types/store.go +++ /dev/null @@ -1,544 +0,0 @@ -package types - -import ( - "fmt" - "io" - - crypto "github.com/cometbft/cometbft/api/cometbft/crypto/v1" - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - snapshottypes "cosmossdk.io/store/snapshots/types" -) - -type Store interface { - GetStoreType() StoreType - CacheWrapper -} - -// Committer is something that can persist to disk -type Committer interface { - Commit() CommitID - LastCommitID() CommitID - - // WorkingHash returns the hash of the KVStore's state before commit. - WorkingHash() []byte - - SetPruning(pruningtypes.PruningOptions) - GetPruning() pruningtypes.PruningOptions -} - -type PausablePruner interface { - // PausePruning let the pruning handler know that the store is being committed - // or not, so the handler can decide to prune or not the store. - // - // NOTE: PausePruning(true) should be called before Commit() and PausePruning(false) - PausePruning(bool) -} - -// CommitStore represents a store that can be committed and provides basic store operations. -// It combines the functionality of Committer and Store interfaces. -// Stores of MultiStore must implement CommitStore. -type CommitStore interface { - Committer - Store -} - -// Queryable allows a Store to expose internal state to the abci.Query -// interface. Multistore can route requests to the proper Store. -// -// This is an optional, but useful extension to any CommitStore -type Queryable interface { - Query(*RequestQuery) (*ResponseQuery, error) -} - -type RequestQuery struct { - Data []byte - Path string - Height int64 - Prove bool -} - -type ResponseQuery struct { - Code uint32 - Log string - Info string - Index int64 - Key []byte - Value []byte - ProofOps *crypto.ProofOps - Height int64 - Codespace string -} - -//---------------------------------------- -// MultiStore - -// StoreUpgrades defines a series of transformations to apply the multistore db upon load -type StoreUpgrades struct { - Added []string `json:"added"` - Renamed []StoreRename `json:"renamed"` - Deleted []string `json:"deleted"` -} - -// StoreRename defines a name change of a sub-store. -// All data previously under a PrefixStore with OldKey will be copied -// to a PrefixStore with NewKey, then deleted from OldKey store. -type StoreRename struct { - OldKey string `json:"old_key"` - NewKey string `json:"new_key"` -} - -// IsAdded returns true if the given key should be added -func (s *StoreUpgrades) IsAdded(key string) bool { - if s == nil { - return false - } - for _, added := range s.Added { - if key == added { - return true - } - } - return false -} - -// IsDeleted returns true if the given key should be deleted -func (s *StoreUpgrades) IsDeleted(key string) bool { - if s == nil { - return false - } - for _, d := range s.Deleted { - if d == key { - return true - } - } - return false -} - -// RenamedFrom returns the oldKey if it was renamed -// Returns "" if it was not renamed -func (s *StoreUpgrades) RenamedFrom(key string) string { - if s == nil { - return "" - } - for _, re := range s.Renamed { - if re.NewKey == key { - return re.OldKey - } - } - return "" -} - -type MultiStore interface { - Store - - // CacheMultiStore branches MultiStore into a cached storage object. - // NOTE: Caller should probably not call .Write() on each, but - // call CacheMultiStore.Write(). - CacheMultiStore() CacheMultiStore - - // CacheMultiStoreWithVersion branches the underlying MultiStore where - // each stored is loaded at a specific version (height). - CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error) - - // GetStore is convenience for fetching substores. - // If the store does not exist, panics. - GetStore(StoreKey) Store - GetKVStore(StoreKey) KVStore - - // TracingEnabled returns if tracing is enabled for the MultiStore. - TracingEnabled() bool - - // SetTracer sets the tracer for the MultiStore that the underlying - // stores will utilize to trace operations. The modified MultiStore is - // returned. - SetTracer(w io.Writer) MultiStore - - // SetTracingContext sets the tracing context for a MultiStore. It is - // implied that the caller should update the context when necessary between - // tracing operations. The modified MultiStore is returned. - SetTracingContext(TraceContext) MultiStore - - // LatestVersion returns the latest version in the store - LatestVersion() int64 -} - -// CacheMultiStore is from MultiStore.CacheMultiStore().... -type CacheMultiStore interface { - MultiStore - Write() // Writes operations to underlying KVStore -} - -// CommitMultiStore is an interface for a MultiStore without cache capabilities. -type CommitMultiStore interface { - Committer - MultiStore - snapshottypes.Snapshotter - - // MountStoreWithDB mount a store of type using the given db. - // If db == nil, the new store will use the CommitMultiStore db. - MountStoreWithDB(key StoreKey, typ StoreType, db dbm.DB) - - // GetCommitStore panics on a nil key. - GetCommitStore(key StoreKey) CommitStore - - // GetCommitKVStore panics on a nil key. - GetCommitKVStore(key StoreKey) CommitKVStore - - // LoadLatestVersion load the latest persisted version. Called once after all calls to - // Mount*Store() are complete. - LoadLatestVersion() error - - // LoadLatestVersionAndUpgrade will load the latest version, but also - // rename/delete/create sub-store keys, before registering all the keys - // in order to handle breaking formats in migrations - LoadLatestVersionAndUpgrade(upgrades *StoreUpgrades) error - - // LoadVersionAndUpgrade will load the named version, but also - // rename/delete/create sub-store keys, before registering all the keys - // in order to handle breaking formats in migrations - LoadVersionAndUpgrade(ver int64, upgrades *StoreUpgrades) error - - // LoadVersion load a specific persisted version. When you load an old version, or when - // the last commit attempt didn't complete, the next commit after loading - // must be idempotent (return the same commit id). Otherwise the behavior is - // undefined. - LoadVersion(ver int64) error - - // SetInterBlockCache set an inter-block (persistent) cache that maintains a mapping from - // StoreKeys to CommitKVStores. - SetInterBlockCache(MultiStorePersistentCache) - - // SetInitialVersion sets the initial version of the IAVL tree. It is used when - // starting a new chain at an arbitrary height. - SetInitialVersion(version int64) error - - // SetIAVLCacheSize sets the cache size of the IAVL tree. - SetIAVLCacheSize(size int) - - // SetIAVLDisableFastNode enables/disables fastnode feature on iavl. - SetIAVLDisableFastNode(disable bool) - - // RollbackToVersion rollback the db to specific version(height). - RollbackToVersion(version int64) error - - // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey - ListeningEnabled(key StoreKey) bool - - // AddListeners adds a listener for the KVStore belonging to the provided StoreKey - AddListeners(keys []StoreKey) - - // PopStateCache returns the accumulated state change messages from the CommitMultiStore - PopStateCache() []*StoreKVPair - - // SetMetrics sets the metrics for the KVStore - SetMetrics(metrics metrics.StoreMetrics) -} - -//---------subsp------------------------------- -// KVStore - -// BasicKVStore is a simple interface to get/set data -type BasicKVStore interface { - // Get returns nil if key doesn't exist. Panics on nil key. - Get(key []byte) []byte - - // Has checks if a key exists. Panics on nil key. - Has(key []byte) bool - - // Set sets the key. Panics on nil key or value. - Set(key, value []byte) - - // Delete deletes the key. Panics on nil key. - Delete(key []byte) -} - -// KVStore additionally provides iteration and deletion -type KVStore interface { - Store - BasicKVStore - - // Iterator over a domain of keys in ascending order. End is exclusive. - // Start must be less than end, or the Iterator is invalid. - // Iterator must be closed by caller. - // To iterate over entire domain, use store.Iterator(nil, nil) - // CONTRACT: No writes may happen within a domain while an iterator exists over it. - // Exceptionally allowed for cachekv.Store, safe to write in the modules. - Iterator(start, end []byte) Iterator - - // ReverseIterator iterates over a domain of keys in descending order. End is exclusive. - // Start must be less than end, or the Iterator is invalid. - // Iterator must be closed by caller. - // CONTRACT: No writes may happen within a domain while an iterator exists over it. - // Exceptionally allowed for cachekv.Store, safe to write in the modules. - ReverseIterator(start, end []byte) Iterator -} - -// Iterator is an alias db's Iterator for convenience. -type Iterator = dbm.Iterator - -// CacheKVStore branches a KVStore and provides read cache functionality. -// After calling .Write() on the CacheKVStore, all previously created -// CacheKVStores on the object expire. -type CacheKVStore interface { - KVStore - - // Write writes operations to underlying KVStore - Write() -} - -// CommitKVStore is an interface for MultiStore. -type CommitKVStore interface { - Committer - KVStore -} - -//---------------------------------------- -// CacheWrap - -// CacheWrap is the most appropriate interface for store ephemeral branching and cache. -// For example, IAVLStore.CacheWrap() returns a CacheKVStore. CacheWrap should not return -// a Committer, since Commit ephemeral store make no sense. It can return KVStore, -// HeapStore, SpaceStore, etc. -type CacheWrap interface { - // Write syncs with the underlying store. - Write() - - // CacheWrap recursively wraps again. - CacheWrap() CacheWrap - - // CacheWrapWithTrace recursively wraps again with tracing enabled. - CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap -} - -type CacheWrapper interface { - // CacheWrap branches a store. - CacheWrap() CacheWrap - - // CacheWrapWithTrace branches a store with tracing enabled. - CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap -} - -func (cid CommitID) IsZero() bool { - return cid.Version == 0 && len(cid.Hash) == 0 -} - -func (cid CommitID) String() string { - return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version) -} - -//---------------------------------------- -// Store types - -// StoreType is kind of store -type StoreType int - -const ( - StoreTypeMulti StoreType = iota - StoreTypeDB - StoreTypeIAVL - StoreTypeTransient - StoreTypeMemory - StoreTypeSMT - StoreTypePersistent -) - -func (st StoreType) String() string { - switch st { - case StoreTypeMulti: - return "StoreTypeMulti" - - case StoreTypeDB: - return "StoreTypeDB" - - case StoreTypeIAVL: - return "StoreTypeIAVL" - - case StoreTypeTransient: - return "StoreTypeTransient" - - case StoreTypeMemory: - return "StoreTypeMemory" - - case StoreTypeSMT: - return "StoreTypeSMT" - - case StoreTypePersistent: - return "StoreTypePersistent" - } - - return "unknown store type" -} - -//---------------------------------------- -// Keys for accessing substores - -// StoreKey is a key used to index stores in a MultiStore. -type StoreKey interface { - Name() string - String() string -} - -// CapabilityKey represent the Cosmos SDK keys for object-capability -// generation in the IBC protocol as defined in https://github.com/cosmos/ibc/tree/master/spec/core/ics-005-port-allocation#data-structures -type CapabilityKey StoreKey - -// KVStoreKey is used for accessing substores. -// Only the pointer value should ever be used - it functions as a capabilities key. -type KVStoreKey struct { - name string -} - -// NewKVStoreKey returns a new pointer to a KVStoreKey. -// Use a pointer so keys don't collide. -func NewKVStoreKey(name string) *KVStoreKey { - if name == "" { - panic("empty key name not allowed") - } - return &KVStoreKey{ - name: name, - } -} - -// NewKVStoreKeys returns a map of new pointers to KVStoreKey's. -// The function will panic if there is a potential conflict in names (see `assertNoPrefix` -// function for more details). -func NewKVStoreKeys(names ...string) map[string]*KVStoreKey { - assertNoCommonPrefix(names) - keys := make(map[string]*KVStoreKey, len(names)) - for _, n := range names { - keys[n] = NewKVStoreKey(n) - } - - return keys -} - -func (key *KVStoreKey) Name() string { - return key.name -} - -func (key *KVStoreKey) String() string { - return fmt.Sprintf("KVStoreKey{%p, %s}", key, key.name) -} - -// TransientStoreKey is used for indexing transient stores in a MultiStore -type TransientStoreKey struct { - name string -} - -// NewTransientStoreKey constructs new TransientStoreKey -// Must return a pointer according to the ocap principle -func NewTransientStoreKey(name string) *TransientStoreKey { - return &TransientStoreKey{ - name: name, - } -} - -// Name implements StoreKey -func (key *TransientStoreKey) Name() string { - return key.name -} - -// String implements StoreKey -func (key *TransientStoreKey) String() string { - return fmt.Sprintf("TransientStoreKey{%p, %s}", key, key.name) -} - -// MemoryStoreKey defines a typed key to be used with an in-memory KVStore. -type MemoryStoreKey struct { - name string -} - -func NewMemoryStoreKey(name string) *MemoryStoreKey { - return &MemoryStoreKey{name: name} -} - -// Name returns the name of the MemoryStoreKey. -func (key *MemoryStoreKey) Name() string { - return key.name -} - -// String returns a stringified representation of the MemoryStoreKey. -func (key *MemoryStoreKey) String() string { - return fmt.Sprintf("MemoryStoreKey{%p, %s}", key, key.name) -} - -//---------------------------------------- - -// TraceContext contains TraceKVStore context data. It will be written with -// every trace operation. -type TraceContext map[string]interface{} - -// Clone clones tc into another instance of TraceContext. -func (tc TraceContext) Clone() TraceContext { - ret := TraceContext{} - for k, v := range tc { - ret[k] = v - } - - return ret -} - -// Merge merges value of newTc into tc. -func (tc TraceContext) Merge(newTc TraceContext) TraceContext { - if tc == nil { - tc = TraceContext{} - } - - for k, v := range newTc { - tc[k] = v - } - - return tc -} - -// MultiStorePersistentCache defines an interface which provides inter-block -// (persistent) caching capabilities for multiple CommitKVStores based on StoreKeys. -type MultiStorePersistentCache interface { - // GetStoreCache wrap and return the provided CommitKVStore with an inter-block (persistent) - // cache. - GetStoreCache(key StoreKey, store CommitKVStore) CommitKVStore - - // Unwrap return the underlying CommitKVStore for a StoreKey. - Unwrap(key StoreKey) CommitKVStore - - // Reset the entire set of internal caches. - Reset() -} - -// StoreWithInitialVersion is a store that can have an arbitrary initial -// version. -type StoreWithInitialVersion interface { - // SetInitialVersion sets the initial version of the IAVL tree. It is used when - // starting a new chain at an arbitrary height. - SetInitialVersion(version int64) -} - -// NewTransientStoreKeys constructs a new map of TransientStoreKey's -// Must return pointers according to the ocap principle -// The function will panic if there is a potential conflict in names -// see `assertNoCommonPrefix` function for more details. -func NewTransientStoreKeys(names ...string) map[string]*TransientStoreKey { - assertNoCommonPrefix(names) - keys := make(map[string]*TransientStoreKey) - for _, n := range names { - keys[n] = NewTransientStoreKey(n) - } - - return keys -} - -// NewMemoryStoreKeys constructs a new map matching store key names to their -// respective MemoryStoreKey references. -// The function will panic if there is a potential conflict in names (see `assertNoPrefix` -// function for more details). -func NewMemoryStoreKeys(names ...string) map[string]*MemoryStoreKey { - assertNoCommonPrefix(names) - keys := make(map[string]*MemoryStoreKey) - for _, n := range names { - keys[n] = NewMemoryStoreKey(n) - } - - return keys -} diff --git a/store/types/store_test.go b/store/types/store_test.go deleted file mode 100644 index b6304d131b..0000000000 --- a/store/types/store_test.go +++ /dev/null @@ -1,240 +0,0 @@ -package types - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "gotest.tools/v3/assert" -) - -func TestStoreUpgrades(t *testing.T) { - t.Parallel() - type toAdd struct { - key string - } - type toDelete struct { - key string - delete bool - } - type toRename struct { - newkey string - result string - } - - cases := map[string]struct { - upgrades *StoreUpgrades - expectAdd []toAdd - expectDelete []toDelete - expectRename []toRename - }{ - "empty upgrade": { - expectDelete: []toDelete{{"foo", false}}, - expectRename: []toRename{{"foo", ""}}, - }, - "simple matches": { - upgrades: &StoreUpgrades{ - Deleted: []string{"foo"}, - Renamed: []StoreRename{{"bar", "baz"}}, - }, - expectDelete: []toDelete{{"foo", true}, {"bar", false}, {"baz", false}}, - expectRename: []toRename{{"foo", ""}, {"bar", ""}, {"baz", "bar"}}, - }, - "many data points": { - upgrades: &StoreUpgrades{ - Added: []string{"foo", "bar", "baz"}, - Deleted: []string{"one", "two", "three", "four", "five"}, - Renamed: []StoreRename{{"old", "new"}, {"white", "blue"}, {"black", "orange"}, {"fun", "boring"}}, - }, - expectAdd: []toAdd{{"foo"}, {"bar"}, {"baz"}}, - expectDelete: []toDelete{{"four", true}, {"six", false}, {"baz", false}}, - expectRename: []toRename{{"white", ""}, {"blue", "white"}, {"boring", "fun"}, {"missing", ""}}, - }, - } - - for name, tc := range cases { - tc := tc - t.Run(name, func(t *testing.T) { - for _, r := range tc.expectAdd { - assert.Equal(t, tc.upgrades.IsAdded(r.key), true) - } - for _, d := range tc.expectDelete { - assert.Equal(t, tc.upgrades.IsDeleted(d.key), d.delete) - } - for _, r := range tc.expectRename { - assert.Equal(t, tc.upgrades.RenamedFrom(r.newkey), r.result) - } - }) - } -} - -func TestCommitID(t *testing.T) { - t.Parallel() - require.True(t, CommitID{}.IsZero()) - require.False(t, CommitID{Version: int64(1)}.IsZero()) - require.False(t, CommitID{Hash: []byte("x")}.IsZero()) - require.Equal(t, "CommitID{[120 120 120 120]:64}", CommitID{Version: int64(100), Hash: []byte("xxxx")}.String()) -} - -func TestKVStoreKey(t *testing.T) { - t.Parallel() - key := NewKVStoreKey("test") - require.Equal(t, "test", key.name) - require.Equal(t, key.name, key.Name()) - require.Equal(t, fmt.Sprintf("KVStoreKey{%p, test}", key), key.String()) -} - -func TestNilKVStoreKey(t *testing.T) { - t.Parallel() - - require.Panics(t, func() { - _ = NewKVStoreKey("") - }, "setting an empty key should panic") -} - -func TestTransientStoreKey(t *testing.T) { - t.Parallel() - key := NewTransientStoreKey("test") - require.Equal(t, "test", key.name) - require.Equal(t, key.name, key.Name()) - require.Equal(t, fmt.Sprintf("TransientStoreKey{%p, test}", key), key.String()) -} - -func TestMemoryStoreKey(t *testing.T) { - t.Parallel() - key := NewMemoryStoreKey("test") - require.Equal(t, "test", key.name) - require.Equal(t, key.name, key.Name()) - require.Equal(t, fmt.Sprintf("MemoryStoreKey{%p, test}", key), key.String()) -} - -func TestTraceContext_Clone(t *testing.T) { - tests := []struct { - name string - tc TraceContext - want TraceContext - }{ - { - "nil TraceContext yields empty TraceContext", - nil, - TraceContext{}, - }, - { - "non-nil TraceContext yields equal TraceContext", - TraceContext{ - "value": 42, - }, - TraceContext{ - "value": 42, - }, - }, - { - "non-nil TraceContext yields equal TraceContext, for more than one key", - TraceContext{ - "value": 42, - "another": 24, - "weird": "string", - }, - TraceContext{ - "value": 42, - "another": 24, - "weird": "string", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.want, tt.tc.Clone()) - }) - } -} - -func TestTraceContext_Clone_is_deep(t *testing.T) { - original := TraceContext{ - "value": 42, - "another": 24, - "weird": "string", - } - - clone := original.Clone() - - clone["other"] = true - - require.NotEqual(t, original, clone) -} - -func TestTraceContext_Merge(t *testing.T) { - tests := []struct { - name string - tc TraceContext - other TraceContext - want TraceContext - }{ - { - "tc is nil, other is empty, yields an empty TraceContext", - nil, - TraceContext{}, - TraceContext{}, - }, - { - "tc is nil, other is nil, yields an empty TraceContext", - nil, - nil, - TraceContext{}, - }, - { - "tc is not nil, other is nil, yields tc", - TraceContext{ - "data": 42, - }, - nil, - TraceContext{ - "data": 42, - }, - }, - { - "tc is not nil, other is not nil, yields tc + other", - TraceContext{ - "data": 42, - }, - TraceContext{ - "data2": 42, - }, - TraceContext{ - "data": 42, - "data2": 42, - }, - }, - { - "tc is not nil, other is not nil, other updates value in tc, yields tc updated with value from other", - TraceContext{ - "data": 42, - }, - TraceContext{ - "data": 24, - }, - TraceContext{ - "data": 24, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.want, tt.tc.Merge(tt.other)) - }) - } -} - -func TestNewTransientStoreKeys(t *testing.T) { - assert.DeepEqual(t, map[string]*TransientStoreKey{}, NewTransientStoreKeys()) - assert.DeepEqual(t, 1, len(NewTransientStoreKeys("one"))) -} - -func TestNewInfiniteGasMeter(t *testing.T) { - gm := NewInfiniteGasMeter() - require.NotNil(t, gm) -} - -func TestStoreTypes(t *testing.T) { - assert.DeepEqual(t, InclusiveEndBytes([]byte("endbytes")), InclusiveEndBytes([]byte("endbytes"))) -} diff --git a/store/types/streaming.go b/store/types/streaming.go deleted file mode 100644 index 7385518a52..0000000000 --- a/store/types/streaming.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -import ( - "context" - - abci "github.com/cometbft/cometbft/api/cometbft/abci/v1" -) - -// ABCIListener is the interface that we're exposing as a streaming service. -// It hooks into the ABCI message processing of the BaseApp. -// The error results are propagated to consensus state machine, -// if you don't want to affect consensus, handle the errors internally and always return `nil` in these APIs. -type ABCIListener interface { - // ListenFinalizeBlock updates the streaming service with the latest FinalizeBlock messages - ListenFinalizeBlock(ctx context.Context, req abci.FinalizeBlockRequest, res abci.FinalizeBlockResponse) error - // ListenCommit updates the steaming service with the latest Commit messages and state changes - ListenCommit(ctx context.Context, res abci.CommitResponse, changeSet []*StoreKVPair) error -} - -// StreamingManager is the struct that maintains a list of ABCIListeners and configuration settings. -type StreamingManager struct { - // ABCIListeners for hooking into the ABCI message processing of the BaseApp - // and exposing the requests and responses to external consumers - ABCIListeners []ABCIListener - - // StopNodeOnErr halts the node when ABCI streaming service listening results in an error. - StopNodeOnErr bool -} diff --git a/store/types/utils.go b/store/types/utils.go deleted file mode 100644 index a54d2746f7..0000000000 --- a/store/types/utils.go +++ /dev/null @@ -1,94 +0,0 @@ -package types - -import ( - "encoding/binary" - "fmt" - "sort" - "strings" -) - -// KVStorePrefixIterator iterates over all the keys with a certain prefix in ascending order -func KVStorePrefixIterator(kvs KVStore, prefix []byte) Iterator { - return kvs.Iterator(prefix, PrefixEndBytes(prefix)) -} - -// KVStoreReversePrefixIterator iterates over all the keys with a certain prefix in descending order. -func KVStoreReversePrefixIterator(kvs KVStore, prefix []byte) Iterator { - return kvs.ReverseIterator(prefix, PrefixEndBytes(prefix)) -} - -// PrefixEndBytes returns the []byte that would end a -// range query for all []byte with a certain prefix -// Deals with last byte of prefix being FF without overflowing -func PrefixEndBytes(prefix []byte) []byte { - if len(prefix) == 0 { - return nil - } - - end := make([]byte, len(prefix)) - copy(end, prefix) - - for { - if end[len(end)-1] != byte(255) { - end[len(end)-1]++ - break - } - - end = end[:len(end)-1] - - if len(end) == 0 { - end = nil - break - } - } - - return end -} - -// InclusiveEndBytes returns the []byte that would end a -// range query such that the input would be included -func InclusiveEndBytes(inclusiveBytes []byte) []byte { - return append(inclusiveBytes, byte(0x00)) -} - -// assertNoCommonPrefix will panic if there are two keys: k1 and k2 in keys, such that -// k1 is a prefix of k2 -func assertNoCommonPrefix(keys []string) { - sorted := make([]string, len(keys)) - copy(sorted, keys) - sort.Strings(sorted) - for i := 1; i < len(sorted); i++ { - if strings.HasPrefix(sorted[i], sorted[i-1]) { - panic(fmt.Sprint("Potential key collision between KVStores:", sorted[i], " - ", sorted[i-1])) - } - } -} - -// Uint64ToBigEndian - marshals uint64 to a bigendian byte slice so it can be sorted -func Uint64ToBigEndian(i uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, i) - return b -} - -// BigEndianToUint64 returns an uint64 from big endian encoded bytes. If encoding -// is empty, zero is returned. -func BigEndianToUint64(bz []byte) uint64 { - if len(bz) == 0 { - return 0 - } - - return binary.BigEndian.Uint64(bz) -} - -// SliceContains implements a generic function for checking if a slice contains -// a certain value. -func SliceContains[T comparable](elements []T, v T) bool { - for _, s := range elements { - if v == s { - return true - } - } - - return false -} diff --git a/store/types/utils_test.go b/store/types/utils_test.go deleted file mode 100644 index d05d9df6d6..0000000000 --- a/store/types/utils_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package types_test - -import ( - "testing" - - "gotest.tools/v3/assert" - - "cosmossdk.io/store/types" -) - -func TestPrefixEndBytes(t *testing.T) { - t.Parallel() - testCases := []struct { - prefix []byte - expected []byte - }{ - {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, - {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, - {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, - {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, - {[]byte{byte(255), byte(255), byte(255)}, nil}, - {[]byte{byte(255)}, nil}, - {nil, nil}, - } - - for _, test := range testCases { - end := types.PrefixEndBytes(test.prefix) - assert.DeepEqual(t, test.expected, end) - } -} - -func TestInclusiveEndBytes(t *testing.T) { - t.Parallel() - assert.DeepEqual(t, []byte{0x00}, types.InclusiveEndBytes(nil)) - bs := []byte("test") - assert.DeepEqual(t, append(bs, byte(0x00)), types.InclusiveEndBytes(bs)) -} diff --git a/store/types/validity.go b/store/types/validity.go deleted file mode 100644 index 73b15bdacc..0000000000 --- a/store/types/validity.go +++ /dev/null @@ -1,31 +0,0 @@ -package types - -var ( - // MaxKeyLength is the maximum allowed length for a key in bytes. - // It is set to 128K - 1 (131,071 bytes). - MaxKeyLength = (1 << 17) - 1 - - // MaxValueLength is the maximum allowed length for a value in bytes. - // It is set to 2G - 1 (2,147,483,647 bytes). - MaxValueLength = (1 << 31) - 1 -) - -// AssertValidKey checks if the key is valid(key is not nil, not empty and within length limit) -func AssertValidKey(key []byte) { - if len(key) == 0 { - panic("key is nil or empty") - } - if len(key) > MaxKeyLength { - panic("key is too large") - } -} - -// AssertValidValue checks if the value is valid(value is not nil and within length limit) -func AssertValidValue(value []byte) { - if value == nil { - panic("value is nil") - } - if len(value) > MaxValueLength { - panic("value is too large") - } -} diff --git a/store/types/validity_test.go b/store/types/validity_test.go deleted file mode 100644 index 56e6791364..0000000000 --- a/store/types/validity_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package types_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" -) - -func TestAssertValidKey(t *testing.T) { - t.Parallel() - require.NotPanics(t, func() { types.AssertValidKey([]byte{0x01}) }) - require.Panics(t, func() { types.AssertValidKey([]byte{}) }) - require.Panics(t, func() { types.AssertValidKey(nil) }) -} - -func TestAssertValidValue(t *testing.T) { - t.Parallel() - require.NotPanics(t, func() { types.AssertValidValue([]byte{}) }) - require.NotPanics(t, func() { types.AssertValidValue([]byte{0x01}) }) - require.Panics(t, func() { types.AssertValidValue(nil) }) -} diff --git a/store/v2/CHANGELOG.md b/store/v2/CHANGELOG.md deleted file mode 100644 index 00e9ec02ae..0000000000 --- a/store/v2/CHANGELOG.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# Changelog - -## [Unreleased] - -### Features - -* [#17294](https://github.com/cosmos/cosmos-sdk/pull/17294) Add snapshot manager Close method. - -### Improvements - -* [#17158](https://github.com/cosmos/cosmos-sdk/pull/17158) Start the goroutine after need to create a snapshot. - -### Bug fixes - -* [#18651](https://github.com/cosmos/cosmos-sdk/pull/18651) Propagate iavl.MutableTree.Remove errors firstly to the caller instead of returning a synthesized error firstly. diff --git a/store/v2/README.md b/store/v2/README.md deleted file mode 100644 index 70f8dfb9a6..0000000000 --- a/store/v2/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Store - -The `store` package contains the implementation of store/v2, which is the SDK's -abstraction around managing historical and committed state. See [ADR-065](../docs/architecture/adr-065-store-v2.md) -and [Store v2 Design](https://docs.google.com/document/d/1l6uXIjTPHOOWM5N4sUUmUfCZvePoa5SNfIEtmgvgQSU/edit#heading=h.nz8dqy6wa4g1) for a high-level overview of the design and rationale. - -## Migration - - - -## Pruning - -The `root.Store` is NOT responsible for pruning. Rather, pruning is the responsibility -of the underlying SS and SC layers. This means pruning can be implementation specific, -such as being synchronous or asynchronous. - -## Usage - -The `store` package contains a `root.Store` type which is intended to act as an -abstraction layer around it's two primary constituent components - state storage (SS) -and state commitment (SC). It acts as the main entry point into storage for an -application to use in server/v2. Through `root.Store`, an application can query -and iterate over both current and historical data, commit new state, perform state -sync, and fetch commitment proofs. - -A `root.Store` is intended to be initialized with already constructed SS and SC -backends (see relevant package documentation for instantiation details). Note, -from the perspective of `root.Store`, there is no notion of multi or single tree/store, -rather these are implementation details of SS and SC. For SS, we utilize store keys -to namespace raw key/value pairs. For SC, we utilize an abstraction, `commitment.CommitStore`, -to map store keys to a commitment trees. diff --git a/store/v2/batch.go b/store/v2/batch.go deleted file mode 100644 index b3fd718520..0000000000 --- a/store/v2/batch.go +++ /dev/null @@ -1,25 +0,0 @@ -package store - -// Batch is a write-only database that commits changes to the underlying database -// when Write is called. A batch cannot be used concurrently. -type Batch interface { - // Set inserts the given value into the key-value data store. - // - // Note: are safe to modify and read after calling Set. - Set(storeKey, key, value []byte) error - - // Delete removes the key from the backing key-value data store. - // - // Note: is safe to modify and read after calling Delete. - Delete(storeKey, key []byte) error - - // Size retrieves the amount of data queued up for writing, this includes - // the keys, values, and deleted keys. - Size() int - - // Write flushes any accumulated data to disk. - Write() error - - // Reset resets the batch. - Reset() error -} diff --git a/store/v2/commitment/README.md b/store/v2/commitment/README.md deleted file mode 100644 index c9bcf111b4..0000000000 --- a/store/v2/commitment/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# State Commitment (SC) - -The `commitment` package contains the state commitment (SC) implementation. -Specifically, it contains an IAVL v1 implementation of SC and the necessary types -and abstractions to support other SC backends, as well as supporting general integration -into store/v2, specifically the `RootStore` type. - -A foremost design goal is that SC backends should be easily swappable, i.e. not -necessarily IAVL. To this end, the scope of SC has been reduced, it must only: - -- Provide a stateful root app hash for height h resulting from applying a batch - of key-value set/deletes to height h-1. -- Fulfill (though not necessarily provide) historical proofs for all heights < `h`. -- Provide an API for snapshot create/restore to fulfill state sync requests. - -Notably, SC is not required to provide key iteration or value retrieval for either -queries or state machine execution, this now being the responsibility of state -storage. - -An SC implementation may choose not to provide historical proofs past height `h - n` -(`n` can be 0) due to the time and space constraints, but since store/v2 defines -an API for historical proofs there should be at least one configuration of a -given SC backend which supports this. - -## Benchmarks - -See this [section](https://docs.google.com/document/d/1l6uXIjTPHOOWM5N4sUUmUfCZvePoa5SNfIEtmgvgQSU/edit#heading=h.7l0i621y5vgm) for specifics on SC benchmarks on various implementations. - -## Pruning - - - -## State Sync - -State commitment (SC) does not have a direct notion of state sync. Rather, -`snapshots.Manager` is responsible for creating and restoring snapshots of the -entire state. The `snapshots.Manager` has a `CommitSnapshotter` field which is -fulfilled by the `CommitStore` type, specifically it implements the `Snapshot` -and `Restore` methods. - -## Usage - -Similar to the `storage` package, the `commitment` package is designed to be used -in a broader store implementation, i.e. it fulfills the role of the SC backend. -Specifically, it provides a `CommitStore` type which accepts a `corestore.KVStore` -and a mapping from store key, a string meant to represent a single module, to a -`Tree`, which reflects the commitment structure. diff --git a/store/v2/commitment/iavl/config.go b/store/v2/commitment/iavl/config.go deleted file mode 100644 index 027f8f2788..0000000000 --- a/store/v2/commitment/iavl/config.go +++ /dev/null @@ -1,15 +0,0 @@ -package iavl - -// Config is the configuration for the IAVL tree. -type Config struct { - CacheSize int `mapstructure:"cache-size" toml:"cache-size" comment:"CacheSize set the size of the iavl tree cache."` - SkipFastStorageUpgrade bool `mapstructure:"skip-fast-storage-upgrade" toml:"skip-fast-storage-upgrade" comment:"If true, the tree will work like no fast storage and always not upgrade fast storage."` -} - -// DefaultConfig returns the default configuration for the IAVL tree. -func DefaultConfig() *Config { - return &Config{ - CacheSize: 1000, - SkipFastStorageUpgrade: false, - } -} diff --git a/store/v2/commitment/iavl/exporter.go b/store/v2/commitment/iavl/exporter.go deleted file mode 100644 index 20f00d1a17..0000000000 --- a/store/v2/commitment/iavl/exporter.go +++ /dev/null @@ -1,40 +0,0 @@ -package iavl - -import ( - "errors" - - "github.com/cosmos/iavl" - - "cosmossdk.io/store/v2/commitment" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -// Exporter is a wrapper around iavl.Exporter. -type Exporter struct { - exporter *iavl.Exporter -} - -// Next returns the next item in the exporter. -func (e *Exporter) Next() (*snapshotstypes.SnapshotIAVLItem, error) { - item, err := e.exporter.Next() - if err != nil { - if errors.Is(err, iavl.ErrorExportDone) { - return nil, commitment.ErrorExportDone - } - return nil, err - } - - return &snapshotstypes.SnapshotIAVLItem{ - Key: item.Key, - Value: item.Value, - Version: item.Version, - Height: int32(item.Height), - }, nil -} - -// Close closes the exporter. -func (e *Exporter) Close() error { - e.exporter.Close() - - return nil -} diff --git a/store/v2/commitment/iavl/importer.go b/store/v2/commitment/iavl/importer.go deleted file mode 100644 index 6f1b0eedf2..0000000000 --- a/store/v2/commitment/iavl/importer.go +++ /dev/null @@ -1,34 +0,0 @@ -package iavl - -import ( - "github.com/cosmos/iavl" - - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -// Importer is a wrapper around iavl.Importer. -type Importer struct { - importer *iavl.Importer -} - -// Add adds the given item to the importer. -func (i *Importer) Add(item *snapshotstypes.SnapshotIAVLItem) error { - return i.importer.Add(&iavl.ExportNode{ - Key: item.Key, - Value: item.Value, - Version: item.Version, - Height: int8(item.Height), - }) -} - -// Commit commits the importer. -func (i *Importer) Commit() error { - return i.importer.Commit() -} - -// Close closes the importer. -func (i *Importer) Close() error { - i.importer.Close() - - return nil -} diff --git a/store/v2/commitment/iavl/tree.go b/store/v2/commitment/iavl/tree.go deleted file mode 100644 index 5de6a4c868..0000000000 --- a/store/v2/commitment/iavl/tree.go +++ /dev/null @@ -1,145 +0,0 @@ -package iavl - -import ( - "fmt" - - "github.com/cosmos/iavl" - ics23 "github.com/cosmos/ics23/go" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - dbm "cosmossdk.io/store/v2/db" -) - -var ( - _ commitment.Tree = (*IavlTree)(nil) - _ store.PausablePruner = (*IavlTree)(nil) -) - -// IavlTree is a wrapper around iavl.MutableTree. -type IavlTree struct { - tree *iavl.MutableTree -} - -// NewIavlTree creates a new IavlTree instance. -func NewIavlTree(db corestore.KVStoreWithBatch, logger log.Logger, cfg *Config) *IavlTree { - tree := iavl.NewMutableTree(dbm.NewWrapper(db), cfg.CacheSize, cfg.SkipFastStorageUpgrade, logger, iavl.AsyncPruningOption(true)) - return &IavlTree{ - tree: tree, - } -} - -// Remove removes the given key from the tree. -func (t *IavlTree) Remove(key []byte) error { - _, _, err := t.tree.Remove(key) - if err != nil { - return err - } - return nil -} - -// Set sets the given key-value pair in the tree. -func (t *IavlTree) Set(key, value []byte) error { - _, err := t.tree.Set(key, value) - return err -} - -// Hash returns the hash of the latest saved version of the tree. -func (t *IavlTree) Hash() []byte { - return t.tree.Hash() -} - -// WorkingHash returns the working hash of the tree. -func (t *IavlTree) WorkingHash() []byte { - return t.tree.WorkingHash() -} - -// LoadVersion loads the state at the given version. -func (t *IavlTree) LoadVersion(version uint64) error { - return t.tree.LoadVersionForOverwriting(int64(version)) -} - -// Commit commits the current state to the tree. -func (t *IavlTree) Commit() ([]byte, uint64, error) { - hash, v, err := t.tree.SaveVersion() - return hash, uint64(v), err -} - -// GetProof returns a proof for the given key and version. -func (t *IavlTree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - immutableTree, err := t.tree.GetImmutable(int64(version)) - if err != nil { - return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) - } - - return immutableTree.GetProof(key) -} - -func (t *IavlTree) Get(version uint64, key []byte) ([]byte, error) { - immutableTree, err := t.tree.GetImmutable(int64(version)) - if err != nil { - return nil, fmt.Errorf("failed to get immutable tree at version %d: %w", version, err) - } - - return immutableTree.Get(key) -} - -// GetLatestVersion returns the latest version of the tree. -func (t *IavlTree) GetLatestVersion() uint64 { - return uint64(t.tree.Version()) -} - -// SetInitialVersion sets the initial version of the database. -func (t *IavlTree) SetInitialVersion(version uint64) error { - t.tree.SetInitialVersion(version) - return nil -} - -// Prune prunes all versions up to and including the provided version. -func (t *IavlTree) Prune(version uint64) error { - return t.tree.DeleteVersionsTo(int64(version)) -} - -// PausePruning pauses the pruning process. -func (t *IavlTree) PausePruning(pause bool) { - if pause { - t.tree.SetCommitting() - } else { - t.tree.UnsetCommitting() - } -} - -// Export exports the tree exporter at the given version. -func (t *IavlTree) Export(version uint64) (commitment.Exporter, error) { - tree, err := t.tree.GetImmutable(int64(version)) - if err != nil { - return nil, err - } - exporter, err := tree.Export() - if err != nil { - return nil, err - } - - return &Exporter{ - exporter: exporter, - }, nil -} - -// Import imports the tree importer at the given version. -func (t *IavlTree) Import(version uint64) (commitment.Importer, error) { - importer, err := t.tree.Import(int64(version)) - if err != nil { - return nil, err - } - - return &Importer{ - importer: importer, - }, nil -} - -// Close closes the iavl tree. -func (t *IavlTree) Close() error { - return t.tree.Close() -} diff --git a/store/v2/commitment/iavl/tree_test.go b/store/v2/commitment/iavl/tree_test.go deleted file mode 100644 index 7435b4e58d..0000000000 --- a/store/v2/commitment/iavl/tree_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package iavl - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/commitment" - dbm "cosmossdk.io/store/v2/db" -) - -func TestCommitterSuite(t *testing.T) { - s := &commitment.CommitStoreTestSuite{ - NewStore: func(db corestore.KVStoreWithBatch, storeKeys []string, logger corelog.Logger) (*commitment.CommitStore, error) { - multiTrees := make(map[string]commitment.Tree) - cfg := DefaultConfig() - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(db, []byte(storeKey)) - multiTrees[storeKey] = NewIavlTree(prefixDB, logger, cfg) - } - return commitment.NewCommitStore(multiTrees, db, logger) - }, - } - - suite.Run(t, s) -} - -func generateTree() *IavlTree { - cfg := DefaultConfig() - db := dbm.NewMemDB() - return NewIavlTree(db, coretesting.NewNopLogger(), cfg) -} - -func TestIavlTree(t *testing.T) { - // generate a new tree - tree := generateTree() - require.NotNil(t, tree) - - initVersion := tree.GetLatestVersion() - require.Equal(t, uint64(0), initVersion) - - // write a batch of version 1 - require.NoError(t, tree.Set([]byte("key1"), []byte("value1"))) - require.NoError(t, tree.Set([]byte("key2"), []byte("value2"))) - require.NoError(t, tree.Set([]byte("key3"), []byte("value3"))) - - workingHash := tree.WorkingHash() - require.NotNil(t, workingHash) - require.Equal(t, uint64(0), tree.GetLatestVersion()) - - // commit the batch - commitHash, version, err := tree.Commit() - require.NoError(t, err) - require.Equal(t, version, uint64(1)) - require.Equal(t, workingHash, commitHash) - require.Equal(t, uint64(1), tree.GetLatestVersion()) - - // ensure we can get expected values - bz, err := tree.Get(1, []byte("key1")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), bz) - - bz, err = tree.Get(2, []byte("key1")) - require.Error(t, err) - require.Nil(t, bz) - - // write a batch of version 2 - require.NoError(t, tree.Set([]byte("key4"), []byte("value4"))) - require.NoError(t, tree.Set([]byte("key5"), []byte("value5"))) - require.NoError(t, tree.Set([]byte("key6"), []byte("value6"))) - require.NoError(t, tree.Remove([]byte("key1"))) // delete key1 - version2Hash := tree.WorkingHash() - require.NotNil(t, version2Hash) - commitHash, version, err = tree.Commit() - require.NoError(t, err) - require.Equal(t, version, uint64(2)) - require.Equal(t, version2Hash, commitHash) - - // get proof for key1 - proof, err := tree.GetProof(1, []byte("key1")) - require.NoError(t, err) - require.NotNil(t, proof.GetExist()) - - proof, err = tree.GetProof(2, []byte("key1")) - require.NoError(t, err) - require.NotNil(t, proof.GetNonexist()) - - // write a batch of version 3 - require.NoError(t, tree.Set([]byte("key7"), []byte("value7"))) - require.NoError(t, tree.Set([]byte("key8"), []byte("value8"))) - require.NoError(t, err) - _, _, err = tree.Commit() - require.NoError(t, err) - - // prune version 1 - err = tree.Prune(1) - require.NoError(t, err) - require.Equal(t, uint64(3), tree.GetLatestVersion()) - // async pruning check - checkErr := func() bool { - if _, err := tree.tree.LoadVersion(1); err != nil { - return true - } - return false - } - require.Eventually(t, checkErr, 2*time.Second, 100*time.Millisecond) - - // load version 2 - err = tree.LoadVersion(2) - require.NoError(t, err) - require.Equal(t, version2Hash, tree.WorkingHash()) - - // close the db - require.NoError(t, tree.Close()) -} diff --git a/store/v2/commitment/mem/tree.go b/store/v2/commitment/mem/tree.go deleted file mode 100644 index 34f26e6a28..0000000000 --- a/store/v2/commitment/mem/tree.go +++ /dev/null @@ -1,67 +0,0 @@ -package mem - -import ( - ics23 "github.com/cosmos/ics23/go" - - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/db" -) - -var _ commitment.Tree = (*Tree)(nil) - -// Tree is a simple in-memory implementation of commitment.Tree. -type Tree struct { - *db.MemDB -} - -func (t *Tree) Remove(key []byte) error { - return t.MemDB.Delete(key) -} - -func (t *Tree) GetLatestVersion() uint64 { - return 0 -} - -func (t *Tree) Hash() []byte { - return nil -} - -func (t *Tree) WorkingHash() []byte { - return nil -} - -func (t *Tree) LoadVersion(version uint64) error { - return nil -} - -func (t *Tree) Commit() ([]byte, uint64, error) { - return nil, 0, nil -} - -func (t *Tree) SetInitialVersion(version uint64) error { - return nil -} - -func (t *Tree) GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) { - return nil, nil -} - -func (t *Tree) Get(version uint64, key []byte) ([]byte, error) { - return t.MemDB.Get(key) -} - -func (t *Tree) Prune(version uint64) error { - return nil -} - -func (t *Tree) Export(version uint64) (commitment.Exporter, error) { - return nil, nil -} - -func (t *Tree) Import(version uint64) (commitment.Importer, error) { - return nil, nil -} - -func New() *Tree { - return &Tree{MemDB: db.NewMemDB()} -} diff --git a/store/v2/commitment/metadata.go b/store/v2/commitment/metadata.go deleted file mode 100644 index 6b98ef2257..0000000000 --- a/store/v2/commitment/metadata.go +++ /dev/null @@ -1,102 +0,0 @@ -package commitment - -import ( - "bytes" - "fmt" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/internal/encoding" - "cosmossdk.io/store/v2/proof" -) - -const ( - commitInfoKeyFmt = "c/%d" // c/ - latestVersionKey = "c/latest" -) - -type MetadataStore struct { - kv corestore.KVStoreWithBatch -} - -func NewMetadataStore(kv corestore.KVStoreWithBatch) *MetadataStore { - return &MetadataStore{ - kv: kv, - } -} - -func (m *MetadataStore) GetLatestVersion() (uint64, error) { - value, err := m.kv.Get([]byte(latestVersionKey)) - if err != nil { - return 0, err - } - if value == nil { - return 0, nil - } - - version, _, err := encoding.DecodeUvarint(value) - if err != nil { - return 0, err - } - - return version, nil -} - -func (m *MetadataStore) GetCommitInfo(version uint64) (*proof.CommitInfo, error) { - key := []byte(fmt.Sprintf(commitInfoKeyFmt, version)) - value, err := m.kv.Get(key) - if err != nil { - return nil, err - } - if value == nil { - return nil, nil - } - - cInfo := &proof.CommitInfo{} - if err := cInfo.Unmarshal(value); err != nil { - return nil, err - } - - return cInfo, nil -} - -func (m *MetadataStore) flushCommitInfo(version uint64, cInfo *proof.CommitInfo) (err error) { - // do nothing if commit info is nil, as will be the case for an empty, initializing store - if cInfo == nil { - return nil - } - - batch := m.kv.NewBatch() - defer func() { - cErr := batch.Close() - if err == nil { - err = cErr - } - }() - cInfoKey := []byte(fmt.Sprintf(commitInfoKeyFmt, version)) - value, err := cInfo.Marshal() - if err != nil { - return err - } - if err := batch.Set(cInfoKey, value); err != nil { - return err - } - - var buf bytes.Buffer - buf.Grow(encoding.EncodeUvarintSize(version)) - if err := encoding.EncodeUvarint(&buf, version); err != nil { - return err - } - if err := batch.Set([]byte(latestVersionKey), buf.Bytes()); err != nil { - return err - } - - if err := batch.WriteSync(); err != nil { - return err - } - return nil -} - -func (m *MetadataStore) deleteCommitInfo(version uint64) error { - cInfoKey := []byte(fmt.Sprintf(commitInfoKeyFmt, version)) - return m.kv.Delete(cInfoKey) -} diff --git a/store/v2/commitment/store.go b/store/v2/commitment/store.go deleted file mode 100644 index d155d11a65..0000000000 --- a/store/v2/commitment/store.go +++ /dev/null @@ -1,413 +0,0 @@ -package commitment - -import ( - "errors" - "fmt" - "io" - "math" - - protoio "github.com/cosmos/gogoproto/io" - - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/internal" - "cosmossdk.io/store/v2/internal/conv" - "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -var ( - _ store.Committer = (*CommitStore)(nil) - _ snapshots.CommitSnapshotter = (*CommitStore)(nil) - _ store.PausablePruner = (*CommitStore)(nil) -) - -// CommitStore is a wrapper around multiple Tree objects mapped by a unique store -// key. Each store key reflects dedicated and unique usage within a module. A caller -// can construct a CommitStore with one or more store keys. It is expected that a -// RootStore use a CommitStore as an abstraction to handle multiple store keys -// and trees. -type CommitStore struct { - logger corelog.Logger - metadata *MetadataStore - multiTrees map[string]Tree -} - -// NewCommitStore creates a new CommitStore instance. -func NewCommitStore(trees map[string]Tree, db corestore.KVStoreWithBatch, logger corelog.Logger) (*CommitStore, error) { - return &CommitStore{ - logger: logger, - multiTrees: trees, - metadata: NewMetadataStore(db), - }, nil -} - -func (c *CommitStore) WriteChangeset(cs *corestore.Changeset) error { - for _, pairs := range cs.Changes { - - key := conv.UnsafeBytesToStr(pairs.Actor) - - tree, ok := c.multiTrees[key] - if !ok { - return fmt.Errorf("store key %s not found in multiTrees", key) - } - for _, kv := range pairs.StateChanges { - if kv.Remove { - if err := tree.Remove(kv.Key); err != nil { - return err - } - } else if err := tree.Set(kv.Key, kv.Value); err != nil { - return err - } - } - } - - return nil -} - -func (c *CommitStore) WorkingCommitInfo(version uint64) *proof.CommitInfo { - storeInfos := make([]proof.StoreInfo, 0, len(c.multiTrees)) - for storeKey, tree := range c.multiTrees { - if internal.IsMemoryStoreKey(storeKey) { - continue - } - bz := []byte(storeKey) - storeInfos = append(storeInfos, proof.StoreInfo{ - Name: bz, - CommitID: proof.CommitID{ - Version: version, - Hash: tree.WorkingHash(), - }, - }) - } - - return &proof.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } -} - -func (c *CommitStore) LoadVersion(targetVersion uint64) error { - // Rollback the metadata to the target version. - latestVersion, err := c.GetLatestVersion() - if err != nil { - return err - } - if targetVersion < latestVersion { - for version := latestVersion; version > targetVersion; version-- { - if err = c.metadata.deleteCommitInfo(version); err != nil { - return err - } - } - } - - for _, tree := range c.multiTrees { - if err := tree.LoadVersion(targetVersion); err != nil { - return err - } - } - - // If the target version is greater than the latest version, it is the snapshot - // restore case, we should create a new commit info for the target version. - var cInfo *proof.CommitInfo - if targetVersion > latestVersion { - cInfo = c.WorkingCommitInfo(targetVersion) - } - - return c.metadata.flushCommitInfo(targetVersion, cInfo) -} - -func (c *CommitStore) Commit(version uint64) (*proof.CommitInfo, error) { - storeInfos := make([]proof.StoreInfo, 0, len(c.multiTrees)) - - for storeKey, tree := range c.multiTrees { - if internal.IsMemoryStoreKey(storeKey) { - continue - } - // If a commit event execution is interrupted, a new iavl store's version - // will be larger than the RMS's metadata, when the block is replayed, we - // should avoid committing that iavl store again. - var commitID proof.CommitID - if tree.GetLatestVersion() >= version { - commitID.Version = version - commitID.Hash = tree.Hash() - } else { - hash, cversion, err := tree.Commit() - if err != nil { - return nil, err - } - if cversion != version { - return nil, fmt.Errorf("commit version %d does not match the target version %d", cversion, version) - } - commitID = proof.CommitID{ - Version: version, - Hash: hash, - } - } - storeInfos = append(storeInfos, proof.StoreInfo{ - Name: []byte(storeKey), - CommitID: commitID, - }) - } - - cInfo := &proof.CommitInfo{ - Version: version, - StoreInfos: storeInfos, - } - - if err := c.metadata.flushCommitInfo(version, cInfo); err != nil { - return nil, err - } - - return cInfo, nil -} - -func (c *CommitStore) SetInitialVersion(version uint64) error { - for _, tree := range c.multiTrees { - if err := tree.SetInitialVersion(version); err != nil { - return err - } - } - - return nil -} - -func (c *CommitStore) GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) { - tree, ok := c.multiTrees[conv.UnsafeBytesToStr(storeKey)] - if !ok { - return nil, fmt.Errorf("store %s not found", storeKey) - } - - iProof, err := tree.GetProof(version, key) - if err != nil { - return nil, err - } - cInfo, err := c.metadata.GetCommitInfo(version) - if err != nil { - return nil, err - } - if cInfo == nil { - return nil, fmt.Errorf("commit info not found for version %d", version) - } - commitOp := proof.NewIAVLCommitmentOp(key, iProof) - _, storeCommitmentOp, err := cInfo.GetStoreProof(storeKey) - if err != nil { - return nil, err - } - - return []proof.CommitmentOp{commitOp, *storeCommitmentOp}, nil -} - -func (c *CommitStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - tree, ok := c.multiTrees[conv.UnsafeBytesToStr(storeKey)] - if !ok { - return nil, fmt.Errorf("store %s not found", storeKey) - } - - bz, err := tree.Get(version, key) - if err != nil { - return nil, fmt.Errorf("failed to get key %s from store %s: %w", key, storeKey, err) - } - - return bz, nil -} - -// Prune implements store.Pruner. -func (c *CommitStore) Prune(version uint64) (ferr error) { - // prune the metadata - for v := version; v > 0; v-- { - if err := c.metadata.deleteCommitInfo(v); err != nil { - return err - } - } - - for _, tree := range c.multiTrees { - if err := tree.Prune(version); err != nil { - ferr = errors.Join(ferr, err) - } - } - - return ferr -} - -// PausePruning implements store.PausablePruner. -func (c *CommitStore) PausePruning(pause bool) { - for _, tree := range c.multiTrees { - if pruner, ok := tree.(store.PausablePruner); ok { - pruner.PausePruning(pause) - } - } -} - -// Snapshot implements snapshotstypes.CommitSnapshotter. -func (c *CommitStore) Snapshot(version uint64, protoWriter protoio.Writer) error { - if version == 0 { - return fmt.Errorf("the snapshot version must be greater than 0") - } - - latestVersion, err := c.GetLatestVersion() - if err != nil { - return err - } - if version > latestVersion { - return fmt.Errorf("the snapshot version %d is greater than the latest version %d", version, latestVersion) - } - - for storeKey, tree := range c.multiTrees { - // TODO: check the parallelism of this loop - if err := func() error { - exporter, err := tree.Export(version) - if err != nil { - return fmt.Errorf("failed to export tree for version %d: %w", version, err) - } - defer exporter.Close() - - err = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_Store{ - Store: &snapshotstypes.SnapshotStoreItem{ - Name: storeKey, - }, - }, - }) - if err != nil { - return fmt.Errorf("failed to write store name: %w", err) - } - - for { - item, err := exporter.Next() - if errors.Is(err, ErrorExportDone) { - break - } else if err != nil { - return fmt.Errorf("failed to get the next export node: %w", err) - } - - if err = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_IAVL{ - IAVL: item, - }, - }); err != nil { - return fmt.Errorf("failed to write iavl node: %w", err) - } - } - - return nil - }(); err != nil { - return err - } - } - - return nil -} - -// Restore implements snapshotstypes.CommitSnapshotter. -func (c *CommitStore) Restore( - version uint64, - format uint32, - protoReader protoio.Reader, - chStorage chan<- *corestore.StateChanges, -) (snapshotstypes.SnapshotItem, error) { - var ( - importer Importer - snapshotItem snapshotstypes.SnapshotItem - storeKey []byte - ) - -loop: - for { - snapshotItem = snapshotstypes.SnapshotItem{} - err := protoReader.ReadMsg(&snapshotItem) - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("invalid protobuf message: %w", err) - } - - switch item := snapshotItem.Item.(type) { - case *snapshotstypes.SnapshotItem_Store: - if importer != nil { - if err := importer.Commit(); err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to commit importer: %w", err) - } - if err := importer.Close(); err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to close importer: %w", err) - } - } - - storeKey = []byte(item.Store.Name) - tree := c.multiTrees[item.Store.Name] - if tree == nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("store %s not found", item.Store.Name) - } - importer, err = tree.Import(version) - if err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to import tree for version %d: %w", version, err) - } - defer importer.Close() - - case *snapshotstypes.SnapshotItem_IAVL: - if importer == nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("received IAVL node item before store item") - } - node := item.IAVL - if node.Height > int32(math.MaxInt8) { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("node height %v cannot exceed %v", - item.IAVL.Height, math.MaxInt8) - } - // Protobuf does not differentiate between []byte{} and nil, but fortunately IAVL does - // not allow nil keys nor nil values for leaf nodes, so we can always set them to empty. - if node.Key == nil { - node.Key = []byte{} - } - if node.Height == 0 { - if node.Value == nil { - node.Value = []byte{} - } - - // If the node is a leaf node, it will be written to the storage. - chStorage <- &corestore.StateChanges{ - Actor: storeKey, - StateChanges: []corestore.KVPair{ - { - Key: node.Key, - Value: node.Value, - }, - }, - } - } - err := importer.Add(node) - if err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to add node to importer: %w", err) - } - default: - break loop - } - } - - if importer != nil { - if err := importer.Commit(); err != nil { - return snapshotstypes.SnapshotItem{}, fmt.Errorf("failed to commit importer: %w", err) - } - } - - return snapshotItem, c.LoadVersion(version) -} - -func (c *CommitStore) GetCommitInfo(version uint64) (*proof.CommitInfo, error) { - return c.metadata.GetCommitInfo(version) -} - -func (c *CommitStore) GetLatestVersion() (uint64, error) { - return c.metadata.GetLatestVersion() -} - -func (c *CommitStore) Close() (ferr error) { - for _, tree := range c.multiTrees { - if err := tree.Close(); err != nil { - ferr = errors.Join(ferr, err) - } - } - - return ferr -} diff --git a/store/v2/commitment/store_bench_test.go b/store/v2/commitment/store_bench_test.go deleted file mode 100644 index 037d211ee7..0000000000 --- a/store/v2/commitment/store_bench_test.go +++ /dev/null @@ -1,129 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package commitment_test - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" -) - -var ( - storeKeys = []string{"store1", "store2", "store3"} - dbBackends = map[string]func(dataDir string) (corestore.KVStoreWithBatch, error){ - "rocksdb_opts": func(dataDir string) (corestore.KVStoreWithBatch, error) { - return dbm.NewRocksDB("test", dataDir) - }, - "pebbledb_opts": func(dataDir string) (corestore.KVStoreWithBatch, error) { - return dbm.NewPebbleDB("test", dataDir) - }, - "goleveldb_opts": func(dataDir string) (corestore.KVStoreWithBatch, error) { - return dbm.NewGoLevelDB("test", dataDir, nil) - }, - } - rng = rand.New(rand.NewSource(543210)) - changesets = make([]*corestore.Changeset, 1000) -) - -func init() { - for i := 0; i < 1000; i++ { - cs := corestore.NewChangeset() - for _, storeKey := range storeKeys { - for j := 0; j < 100; j++ { - key := make([]byte, 16) - val := make([]byte, 16) - - _, err := rng.Read(key) - if err != nil { - panic(err) - } - _, err = rng.Read(val) - if err != nil { - panic(err) - } - - cs.AddKVPair([]byte(storeKey), corestore.KVPair{Key: key, Value: val}) - } - } - changesets[i] = cs - } -} - -func getCommitStore(b *testing.B, db corestore.KVStoreWithBatch) *commitment.CommitStore { - b.Helper() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(db, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) - } - - sc, err := commitment.NewCommitStore(multiTrees, db, coretesting.NewNopLogger()) - require.NoError(b, err) - - return sc -} - -func BenchmarkCommit(b *testing.B) { - for ty, fn := range dbBackends { - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - b.StopTimer() - for i := 0; i < b.N; i++ { - db, err := fn(b.TempDir()) - require.NoError(b, err) - sc := getCommitStore(b, db) - b.StartTimer() - for j, cs := range changesets { - require.NoError(b, sc.WriteChangeset(cs)) - _, err := sc.Commit(uint64(j + 1)) - require.NoError(b, err) - } - b.StopTimer() - require.NoError(b, db.Close()) - } - }) - } -} - -func BenchmarkGetProof(b *testing.B) { - for ty, fn := range dbBackends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - sc := getCommitStore(b, db) - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - b.StopTimer() - // commit some changesets - for i, cs := range changesets { - require.NoError(b, sc.WriteChangeset(cs)) - _, err = sc.Commit(uint64(i + 1)) - require.NoError(b, err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - // non-existing proof - p, err := sc.GetProof([]byte(storeKeys[0]), 500, []byte("key-1-1")) - require.NoError(b, err) - require.NotNil(b, p) - // existing proof - p, err = sc.GetProof([]byte(storeKeys[1]), 500, changesets[499].Changes[1].StateChanges[1].Key) - require.NoError(b, err) - require.NotNil(b, p) - } - }) - require.NoError(b, db.Close()) - } -} diff --git a/store/v2/commitment/store_test_suite.go b/store/v2/commitment/store_test_suite.go deleted file mode 100644 index 7e8c3587d3..0000000000 --- a/store/v2/commitment/store_test_suite.go +++ /dev/null @@ -1,166 +0,0 @@ -package commitment - -import ( - "bytes" - "fmt" - "io" - "sync" - - "github.com/stretchr/testify/suite" - - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -const ( - storeKey1 = "store1" - storeKey2 = "store2" -) - -// CommitStoreTestSuite is a test suite to be used for all tree backends. -type CommitStoreTestSuite struct { - suite.Suite - - NewStore func(db corestore.KVStoreWithBatch, storeKeys []string, logger corelog.Logger) (*CommitStore, error) -} - -func (s *CommitStoreTestSuite) TestStore_Snapshotter() { - storeKeys := []string{storeKey1, storeKey2} - commitStore, err := s.NewStore(dbm.NewMemDB(), storeKeys, coretesting.NewNopLogger()) - s.Require().NoError(err) - - latestVersion := uint64(10) - kvCount := 10 - for i := uint64(1); i <= latestVersion; i++ { - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range storeKeys { - kvPairs[storeKey] = corestore.KVPairs{} - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(kvPairs))) - - _, err = commitStore.Commit(i) - s.Require().NoError(err) - } - - cInfo := commitStore.WorkingCommitInfo(latestVersion) - s.Require().Equal(len(storeKeys), len(cInfo.StoreInfos)) - - // create a snapshot - dummyExtensionItem := snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_Extension{ - Extension: &snapshotstypes.SnapshotExtensionMeta{ - Name: "test", - Format: 1, - }, - }, - } - - targetStore, err := s.NewStore(dbm.NewMemDB(), storeKeys, coretesting.NewNopLogger()) - s.Require().NoError(err) - - chunks := make(chan io.ReadCloser, kvCount*int(latestVersion)) - go func() { - streamWriter := snapshots.NewStreamWriter(chunks) - s.Require().NotNil(streamWriter) - defer streamWriter.Close() - err := commitStore.Snapshot(latestVersion, streamWriter) - s.Require().NoError(err) - // write an extension metadata - err = streamWriter.WriteMsg(&dummyExtensionItem) - s.Require().NoError(err) - }() - - streamReader, err := snapshots.NewStreamReader(chunks) - s.Require().NoError(err) - chStorage := make(chan *corestore.StateChanges, 100) - leaves := make(map[string]string) - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - for kv := range chStorage { - for _, actor := range kv.StateChanges { - leaves[fmt.Sprintf("%s_%s", kv.Actor, actor.Key)] = string(actor.Value) - } - } - wg.Done() - }() - nextItem, err := targetStore.Restore(latestVersion, snapshotstypes.CurrentFormat, streamReader, chStorage) - s.Require().NoError(err) - s.Require().Equal(*dummyExtensionItem.GetExtension(), *nextItem.GetExtension()) - - close(chStorage) - wg.Wait() - s.Require().Equal(len(storeKeys)*kvCount*int(latestVersion), len(leaves)) - for _, storeKey := range storeKeys { - for i := 1; i <= int(latestVersion); i++ { - for j := 0; j < kvCount; j++ { - key := fmt.Sprintf("%s_key-%d-%d", storeKey, i, j) - s.Require().Equal(leaves[key], fmt.Sprintf("value-%d-%d", i, j)) - } - } - } - - // check the restored tree hash - targetCommitInfo := targetStore.WorkingCommitInfo(latestVersion) - for _, storeInfo := range targetCommitInfo.StoreInfos { - matched := false - for _, latestStoreInfo := range cInfo.StoreInfos { - if bytes.Equal(storeInfo.Name, latestStoreInfo.Name) { - s.Require().Equal(latestStoreInfo.GetHash(), storeInfo.GetHash()) - matched = true - } - } - s.Require().True(matched) - } -} - -func (s *CommitStoreTestSuite) TestStore_Pruning() { - storeKeys := []string{storeKey1, storeKey2} - pruneOpts := store.NewPruningOptionWithCustom(10, 5) - commitStore, err := s.NewStore(dbm.NewMemDB(), storeKeys, coretesting.NewNopLogger()) - s.Require().NoError(err) - - latestVersion := uint64(100) - kvCount := 10 - for i := uint64(1); i <= latestVersion; i++ { - kvPairs := make(map[string]corestore.KVPairs) - for _, storeKey := range storeKeys { - kvPairs[storeKey] = corestore.KVPairs{} - for j := 0; j < kvCount; j++ { - key := []byte(fmt.Sprintf("key-%d-%d", i, j)) - value := []byte(fmt.Sprintf("value-%d-%d", i, j)) - kvPairs[storeKey] = append(kvPairs[storeKey], corestore.KVPair{Key: key, Value: value}) - } - } - s.Require().NoError(commitStore.WriteChangeset(corestore.NewChangesetWithPairs(kvPairs))) - - _, err = commitStore.Commit(i) - s.Require().NoError(err) - - if prune, pruneVersion := pruneOpts.ShouldPrune(i); prune { - s.Require().NoError(commitStore.Prune(pruneVersion)) - } - - } - - pruneVersion := latestVersion - pruneOpts.KeepRecent - 1 - // check the store - for i := uint64(1); i <= latestVersion; i++ { - commitInfo, _ := commitStore.GetCommitInfo(i) - if i <= pruneVersion { - s.Require().Nil(commitInfo) - } else { - s.Require().NotNil(commitInfo) - } - } -} diff --git a/store/v2/commitment/tree.go b/store/v2/commitment/tree.go deleted file mode 100644 index 54fe2d60f0..0000000000 --- a/store/v2/commitment/tree.go +++ /dev/null @@ -1,58 +0,0 @@ -package commitment - -import ( - "errors" - "io" - - ics23 "github.com/cosmos/ics23/go" - - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -// ErrorExportDone is returned by Exporter.Next() when all items have been exported. -var ErrorExportDone = errors.New("export is complete") - -// Tree is the interface that wraps the basic Tree methods. -type Tree interface { - Set(key, value []byte) error - Remove(key []byte) error - GetLatestVersion() uint64 - - // Hash returns the hash of the latest saved version of the tree. - Hash() []byte - - // WorkingHash returns the working hash of the tree. - WorkingHash() []byte - - LoadVersion(version uint64) error - Commit() ([]byte, uint64, error) - SetInitialVersion(version uint64) error - GetProof(version uint64, key []byte) (*ics23.CommitmentProof, error) - - // Get attempts to retrieve a value from the tree for a given version. - // - // NOTE: This method only exists to support migration from IAVL v0/v1 to v2. - // Once migration is complete, this method should be removed and/or not used. - Get(version uint64, key []byte) ([]byte, error) - - Prune(version uint64) error - Export(version uint64) (Exporter, error) - Import(version uint64) (Importer, error) - - io.Closer -} - -// Exporter is the interface that wraps the basic Export methods. -type Exporter interface { - Next() (*snapshotstypes.SnapshotIAVLItem, error) - - io.Closer -} - -// Importer is the interface that wraps the basic Import methods. -type Importer interface { - Add(*snapshotstypes.SnapshotIAVLItem) error - Commit() error - - io.Closer -} diff --git a/store/v2/database.go b/store/v2/database.go deleted file mode 100644 index a0466de18d..0000000000 --- a/store/v2/database.go +++ /dev/null @@ -1,63 +0,0 @@ -package store - -import ( - "io" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/proof" -) - -// VersionedDatabase defines an API for a versioned database that allows reads, -// writes, iteration and commitment over a series of versions. -type VersionedDatabase interface { - Has(storeKey []byte, version uint64, key []byte) (bool, error) - Get(storeKey []byte, version uint64, key []byte) ([]byte, error) - GetLatestVersion() (uint64, error) - SetLatestVersion(version uint64) error - - Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - - ApplyChangeset(version uint64, cs *corestore.Changeset) error - - // Close releases associated resources. It should NOT be idempotent. It must - // only be called once and any call after may panic. - io.Closer -} - -// Committer defines an API for committing state. -type Committer interface { - // WriteChangeset writes the changeset to the commitment state. - WriteChangeset(cs *corestore.Changeset) error - - // WorkingCommitInfo returns the CommitInfo for the working tree. - WorkingCommitInfo(version uint64) *proof.CommitInfo - - // GetLatestVersion returns the latest version. - GetLatestVersion() (uint64, error) - - // LoadVersion loads the tree at the given version. - LoadVersion(targetVersion uint64) error - - // Commit commits the working tree to the database. - Commit(version uint64) (*proof.CommitInfo, error) - - // GetProof returns the proof of existence or non-existence for the given key. - GetProof(storeKey []byte, version uint64, key []byte) ([]proof.CommitmentOp, error) - - // Get returns the value for the given key at the given version. - // - // NOTE: This method only exists to support migration from IAVL v0/v1 to v2. - // Once migration is complete, this method should be removed and/or not used. - Get(storeKey []byte, version uint64, key []byte) ([]byte, error) - - // SetInitialVersion sets the initial version of the tree. - SetInitialVersion(version uint64) error - - // GetCommitInfo returns the CommitInfo for the given version. - GetCommitInfo(version uint64) (*proof.CommitInfo, error) - - // Close releases associated resources. It should NOT be idempotent. It must - // only be called once and any call after may panic. - io.Closer -} diff --git a/store/v2/db/db.go b/store/v2/db/db.go deleted file mode 100644 index a8c741e454..0000000000 --- a/store/v2/db/db.go +++ /dev/null @@ -1,31 +0,0 @@ -package db - -import ( - "fmt" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -type DBType string - -const ( - DBTypeGoLevelDB DBType = "goleveldb" - DBTypeRocksDB DBType = "rocksdb" - DBTypePebbleDB DBType = "pebbledb" - DBTypePrefixDB DBType = "prefixdb" - - DBFileSuffix string = ".db" -) - -func NewDB(dbType DBType, name, dataDir string, opts store.DBOptions) (corestore.KVStoreWithBatch, error) { - switch dbType { - case DBTypeGoLevelDB: - return NewGoLevelDB(name, dataDir, opts) - - case DBTypePebbleDB: - return NewPebbleDB(name, dataDir) - } - - return nil, fmt.Errorf("unsupported db type: %s", dbType) -} diff --git a/store/v2/db/db_test.go b/store/v2/db/db_test.go deleted file mode 100644 index 913fdb3243..0000000000 --- a/store/v2/db/db_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package db - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" -) - -type DBTestSuite struct { - suite.Suite - - db corestore.KVStoreWithBatch -} - -func (s *DBTestSuite) TearDownSuite() { - s.Require().NoError(s.db.Close()) -} - -func (s *DBTestSuite) TestDBOperations() { - // Batch Set - b := s.db.NewBatch() - s.Require().NoError(b.Set([]byte("key"), []byte("value"))) - s.Require().NoError(b.Set([]byte("key1"), []byte("value1"))) - s.Require().NoError(b.Set([]byte("key2"), []byte("value2"))) - s.Require().NoError(b.Write()) - - // Get - value, err := s.db.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value"), value) - - // Has - has, err := s.db.Has([]byte("key1")) - s.Require().NoError(err) - s.Require().True(has) - has, err = s.db.Has([]byte("key3")) - s.Require().NoError(err) - s.Require().False(has) - - // Batch Delete - b = s.db.NewBatch() - s.Require().NoError(b.Delete([]byte("key1"))) - s.Require().NoError(b.Write()) - - // Has - has, err = s.db.Has([]byte("key1")) - s.Require().NoError(err) - s.Require().False(has) - - // Set & Delete - s.Require().NoError(s.db.Set([]byte("key3"), []byte("value3"))) - has, err = s.db.Has([]byte("key3")) - s.Require().NoError(err) - s.Require().True(has) - value, err = s.db.Get([]byte("key3")) - s.Require().NoError(err) - s.Require().Equal([]byte("value3"), value) - s.Require().NoError(s.db.Delete([]byte("key3"))) - has, err = s.db.Has([]byte("key3")) - s.Require().NoError(err) - s.Require().False(has) - value, err = s.db.Get([]byte("key3")) - s.Require().NoError(err) - s.Require().Nil(value) -} - -func (s *DBTestSuite) TestIterator() { - // Set - b := s.db.NewBatch() - for i := 0; i < 10; i++ { - s.Require().NoError(b.Set([]byte(fmt.Sprintf("key%d", i)), []byte(fmt.Sprintf("value%d", i)))) - } - s.Require().NoError(b.Write()) - - // Iterator - itr, err := s.db.Iterator(nil, nil) - s.Require().NoError(err) - defer itr.Close() - - for ; itr.Valid(); itr.Next() { - key := itr.Key() - value := itr.Value() - value1, err := s.db.Get(key) - s.Require().NoError(err) - s.Require().Equal(value1, value) - } - - // Reverse Iterator - ritr, err := s.db.ReverseIterator([]byte("key0"), []byte("keys")) - s.Require().NoError(err) - defer ritr.Close() - - index := 9 - for ; ritr.Valid(); ritr.Next() { - key := ritr.Key() - value := ritr.Value() - s.Require().Equal([]byte(fmt.Sprintf("key%d", index)), key) - value1, err := s.db.Get(key) - s.Require().NoError(err) - s.Require().Equal(value1, value) - index -= 1 - } - s.Require().Equal(-1, index) -} - -func TestMemDBSuite(t *testing.T) { - suite.Run(t, &DBTestSuite{ - db: NewMemDB(), - }) -} - -func TestPebbleDBSuite(t *testing.T) { - db, err := NewPebbleDB("test", t.TempDir()) - require.NoError(t, err) - - suite.Run(t, &DBTestSuite{ - db: db, - }) -} - -func TestGoLevelDBSuite(t *testing.T) { - db, err := NewGoLevelDB("test", t.TempDir(), nil) - require.NoError(t, err) - suite.Run(t, &DBTestSuite{ - db: db, - }) -} - -func TestPrefixDBSuite(t *testing.T) { - suite.Run(t, &DBTestSuite{ - db: NewPrefixDB(NewMemDB(), []byte("prefix")), - }) -} diff --git a/store/v2/db/goleveldb.go b/store/v2/db/goleveldb.go deleted file mode 100644 index 59e2e08ad8..0000000000 --- a/store/v2/db/goleveldb.go +++ /dev/null @@ -1,405 +0,0 @@ -package db - -import ( - "bytes" - "errors" - "fmt" - "path/filepath" - - "github.com/spf13/cast" - "github.com/syndtr/goleveldb/leveldb" - dberrors "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/filter" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - storeerrors "cosmossdk.io/store/v2/errors" -) - -var _ corestore.KVStoreWithBatch = (*GoLevelDB)(nil) - -// GoLevelDB implements corestore.KVStore using github.com/syndtr/goleveldb/leveldb. -// It is used for only store v2 migration, since some clients use goleveldb as -// the IAVL v0/v1 backend. -type GoLevelDB struct { - db *leveldb.DB -} - -func NewGoLevelDB(name, dir string, opts store.DBOptions) (*GoLevelDB, error) { - defaultOpts := &opt.Options{ - Filter: filter.NewBloomFilter(10), // by default, goleveldb doesn't use a bloom filter. - } - - if opts != nil { - files := cast.ToInt(opts.Get("maxopenfiles")) - if files > 0 { - defaultOpts.OpenFilesCacheCapacity = files - } - } - - return NewGoLevelDBWithOpts(name, dir, defaultOpts) -} - -func NewGoLevelDBWithOpts(name, dir string, o *opt.Options) (*GoLevelDB, error) { - dbPath := filepath.Join(dir, name+DBFileSuffix) - db, err := leveldb.OpenFile(dbPath, o) - if err != nil { - return nil, err - } - return &GoLevelDB{db: db}, nil -} - -// Get implements corestore.KVStore. -func (db *GoLevelDB) Get(key []byte) ([]byte, error) { - if len(key) == 0 { - return nil, storeerrors.ErrKeyEmpty - } - res, err := db.db.Get(key, nil) - if err != nil { - if errors.Is(err, dberrors.ErrNotFound) { - return nil, nil - } - return nil, err - } - return res, nil -} - -// Has implements corestore.KVStore. -func (db *GoLevelDB) Has(key []byte) (bool, error) { - return db.db.Has(key, nil) -} - -// Set implements corestore.KVStore. -func (db *GoLevelDB) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - return db.db.Put(key, value, nil) -} - -// SetSync implements corestore.KVStore. -func (db *GoLevelDB) SetSync(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - return db.db.Put(key, value, &opt.WriteOptions{Sync: true}) -} - -// Delete implements corestore.KVStore. -func (db *GoLevelDB) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - return db.db.Delete(key, nil) -} - -// DeleteSync implements corestore.KVStore. -func (db *GoLevelDB) DeleteSync(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - return db.db.Delete(key, &opt.WriteOptions{Sync: true}) -} - -func (db *GoLevelDB) RawDB() *leveldb.DB { - return db.db -} - -// Close implements corestore.KVStore. -func (db *GoLevelDB) Close() error { - return db.db.Close() -} - -// Print implements corestore.KVStore. -func (db *GoLevelDB) Print() error { - str, err := db.db.GetProperty("leveldb.stats") - if err != nil { - return err - } - fmt.Printf("%v\n", str) - - itr := db.db.NewIterator(nil, nil) - for itr.Next() { - key := itr.Key() - value := itr.Value() - fmt.Printf("[%X]:\t[%X]\n", key, value) - } - return nil -} - -// Stats implements corestore.KVStore. -func (db *GoLevelDB) Stats() map[string]string { - keys := []string{ - "leveldb.num-files-at-level{n}", - "leveldb.stats", - "leveldb.sstables", - "leveldb.blockpool", - "leveldb.cachedblock", - "leveldb.openedtables", - "leveldb.alivesnaps", - "leveldb.aliveiters", - } - - stats := make(map[string]string) - for _, key := range keys { - if str, err := db.db.GetProperty(key); err == nil { - stats[key] = str - } - } - return stats -} - -func (db *GoLevelDB) ForceCompact(start, limit []byte) error { - return db.db.CompactRange(util.Range{Start: start, Limit: limit}) -} - -// NewBatch implements corestore.BatchCreator. -func (db *GoLevelDB) NewBatch() corestore.Batch { - return newGoLevelDBBatch(db) -} - -// NewBatchWithSize implements corestore.BatchCreator. -func (db *GoLevelDB) NewBatchWithSize(size int) corestore.Batch { - return newGoLevelDBBatchWithSize(db, size) -} - -// Iterator implements corestore.KVStore. -func (db *GoLevelDB) Iterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - itr := db.db.NewIterator(&util.Range{Start: start, Limit: end}, nil) - return newGoLevelDBIterator(itr, start, end, false), nil -} - -// ReverseIterator implements corestore.KVStore. -func (db *GoLevelDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - itr := db.db.NewIterator(&util.Range{Start: start, Limit: end}, nil) - return newGoLevelDBIterator(itr, start, end, true), nil -} - -type goLevelDBIterator struct { - source iterator.Iterator - start []byte - end []byte - isReverse bool - isInvalid bool -} - -var _ corestore.Iterator = (*goLevelDBIterator)(nil) - -func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { - if isReverse { - if end == nil { - source.Last() - } else { - if source.Seek(end) { - eoakey := source.Key() // end or after key - if bytes.Compare(end, eoakey) <= 0 { - source.Prev() - } - } else { - source.Last() - } - } - } else { - if start == nil { - source.First() - } else { - source.Seek(start) - } - } - return &goLevelDBIterator{ - source: source, - start: start, - end: end, - isReverse: isReverse, - isInvalid: false, - } -} - -// Domain implements Iterator. -func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -// Valid implements Iterator. -func (itr *goLevelDBIterator) Valid() bool { - // Once invalid, forever invalid. - if itr.isInvalid { - return false - } - - // If source errors, invalid. - if err := itr.Error(); err != nil { - itr.isInvalid = true - return false - } - - // If source is invalid, invalid. - if !itr.source.Valid() { - itr.isInvalid = true - return false - } - - // If key is end or past it, invalid. - start := itr.start - end := itr.end - key := itr.source.Key() - - if itr.isReverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.isInvalid = true - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.isInvalid = true - return false - } - } - - // Valid - return true -} - -// Key implements Iterator. -func (itr *goLevelDBIterator) Key() []byte { - // Key returns a copy of the current key. - // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 - itr.assertIsValid() - return cp(itr.source.Key()) -} - -// Value implements Iterator. -func (itr *goLevelDBIterator) Value() []byte { - // Value returns a copy of the current value. - // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 - itr.assertIsValid() - return cp(itr.source.Value()) -} - -// Next implements Iterator. -func (itr *goLevelDBIterator) Next() { - itr.assertIsValid() - if itr.isReverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -// Error implements Iterator. -func (itr *goLevelDBIterator) Error() error { - return itr.source.Error() -} - -// Close implements Iterator. -func (itr *goLevelDBIterator) Close() error { - itr.source.Release() - return nil -} - -func (itr goLevelDBIterator) assertIsValid() { - if !itr.Valid() { - panic("iterator is invalid") - } -} - -type goLevelDBBatch struct { - db *GoLevelDB - batch *leveldb.Batch -} - -var _ corestore.Batch = (*goLevelDBBatch)(nil) - -func newGoLevelDBBatch(db *GoLevelDB) *goLevelDBBatch { - return &goLevelDBBatch{ - db: db, - batch: new(leveldb.Batch), - } -} - -func newGoLevelDBBatchWithSize(db *GoLevelDB, size int) *goLevelDBBatch { - return &goLevelDBBatch{ - db: db, - batch: leveldb.MakeBatch(size), - } -} - -// Set implements corestore.Batch. -func (b *goLevelDBBatch) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - b.batch.Put(key, value) - return nil -} - -// Delete implements corestore.Batch. -func (b *goLevelDBBatch) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - b.batch.Delete(key) - return nil -} - -// Write implements corestore.Batch. -func (b *goLevelDBBatch) Write() error { - return b.write(false) -} - -// WriteSync implements corestore.Batch. -func (b *goLevelDBBatch) WriteSync() error { - return b.write(true) -} - -func (b *goLevelDBBatch) write(sync bool) error { - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - if err := b.db.db.Write(b.batch, &opt.WriteOptions{Sync: sync}); err != nil { - return err - } - // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. - return b.Close() -} - -// Close implements corestore.Batch. -func (b *goLevelDBBatch) Close() error { - if b.batch != nil { - b.batch.Reset() - b.batch = nil - } - return nil -} - -// GetByteSize implements corestore.Batch -func (b *goLevelDBBatch) GetByteSize() (int, error) { - if b.batch == nil { - return 0, storeerrors.ErrBatchClosed - } - return len(b.batch.Dump()), nil -} diff --git a/store/v2/db/memdb.go b/store/v2/db/memdb.go deleted file mode 100644 index c6250db665..0000000000 --- a/store/v2/db/memdb.go +++ /dev/null @@ -1,465 +0,0 @@ -package db - -import ( - "bytes" - "context" - "fmt" - "sync" - - "github.com/google/btree" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/errors" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - bTreeDegree = 32 -) - -// item is a btree.Item with byte slices as keys and values -type item struct { - key []byte - value []byte -} - -// Less implements btree.Item. -func (i item) Less(other btree.Item) bool { - // this considers nil == []byte{}, but that's ok since we handle nil endpoints - // in iterators specially anyway - return bytes.Compare(i.key, other.(item).key) == -1 -} - -// newKey creates a new key item. -func newKey(key []byte) item { - return item{key: key} -} - -// newPair creates a new pair item. -func newPair(key, value []byte) item { - return item{key: key, value: value} -} - -var _ corestore.KVStoreWithBatch = (*MemDB)(nil) - -// MemDB is an in-memory database backend using a B-tree for storage. -// -// For performance reasons, all given and returned keys and values are pointers to the in-memory -// database, so modifying them will cause the stored values to be modified as well. All DB methods -// already specify that keys and values should be considered read-only, but this is especially -// important with MemDB. -type MemDB struct { - mtx sync.RWMutex - btree *btree.BTree -} - -// NewMemDB creates a new in-memory database. -func NewMemDB() *MemDB { - database := &MemDB{ - btree: btree.New(bTreeDegree), - } - return database -} - -// Get implements DB. -func (db *MemDB) Get(key []byte) ([]byte, error) { - if len(key) == 0 { - return nil, errors.ErrKeyEmpty - } - db.mtx.RLock() - defer db.mtx.RUnlock() - - i := db.btree.Get(newKey(key)) - if i != nil { - return i.(item).value, nil - } - return nil, nil -} - -// Has implements DB. -func (db *MemDB) Has(key []byte) (bool, error) { - if len(key) == 0 { - return false, errors.ErrKeyEmpty - } - db.mtx.RLock() - defer db.mtx.RUnlock() - - return db.btree.Has(newKey(key)), nil -} - -// Set implements DB. -func (db *MemDB) Set(key, value []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - if value == nil { - return errors.ErrValueNil - } - db.mtx.Lock() - defer db.mtx.Unlock() - - db.set(key, value) - return nil -} - -// set sets a value without locking the mutex. -func (db *MemDB) set(key, value []byte) { - db.btree.ReplaceOrInsert(newPair(key, value)) -} - -// SetSync implements DB. -func (db *MemDB) SetSync(key, value []byte) error { - return db.Set(key, value) -} - -// Delete implements DB. -func (db *MemDB) Delete(key []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - db.mtx.Lock() - defer db.mtx.Unlock() - - db.delete(key) - return nil -} - -// delete deletes a key without locking the mutex. -func (db *MemDB) delete(key []byte) { - db.btree.Delete(newKey(key)) -} - -// DeleteSync implements DB. -func (db *MemDB) DeleteSync(key []byte) error { - return db.Delete(key) -} - -// Close implements DB. -func (db *MemDB) Close() error { - // Close is a noop since for an in-memory database, we don't have a destination to flush - // contents to nor do we want any data loss on invoking Close(). - // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 - return nil -} - -// Print implements DB. -func (db *MemDB) Print() error { - db.mtx.RLock() - defer db.mtx.RUnlock() - - db.btree.Ascend(func(i btree.Item) bool { - item := i.(item) - fmt.Printf("[%X]:\t[%X]\n", item.key, item.value) - return true - }) - return nil -} - -// Stats implements DB. -func (db *MemDB) Stats() map[string]string { - db.mtx.RLock() - defer db.mtx.RUnlock() - - stats := make(map[string]string) - stats["database.type"] = "memDB" - stats["database.size"] = fmt.Sprintf("%d", db.btree.Len()) - return stats -} - -// NewBatch implements DB. -func (db *MemDB) NewBatch() corestore.Batch { - return newMemDBBatch(db) -} - -// NewBatchWithSize implements DB. -// It does the same thing as NewBatch because we can't pre-allocate memDBBatch -func (db *MemDB) NewBatchWithSize(size int) corestore.Batch { - return newMemDBBatch(db) -} - -// Iterator implements DB. -// Takes out a read-lock on the database until the iterator is closed. -func (db *MemDB) Iterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - return newMemDBIterator(db, start, end, false), nil -} - -// ReverseIterator implements DB. -// Takes out a read-lock on the database until the iterator is closed. -func (db *MemDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - return newMemDBIterator(db, start, end, true), nil -} - -// IteratorNoMtx makes an iterator with no mutex. -func (db *MemDB) IteratorNoMtx(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - return newMemDBIteratorMtxChoice(db, start, end, false, false), nil -} - -// ReverseIteratorNoMtx makes an iterator with no mutex. -func (db *MemDB) ReverseIteratorNoMtx(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - return newMemDBIteratorMtxChoice(db, start, end, true, false), nil -} - -const ( - // Size of the channel buffer between traversal goroutine and iterator. Using an unbuffered - // channel causes two context switches per item sent, while buffering allows more work per - // context switch. Tuned with benchmarks. - chBufferSize = 64 -) - -// memDBIterator is a memDB iterator. -type memDBIterator struct { - ch <-chan *item - cancel context.CancelFunc - item *item - start []byte - end []byte - useMtx bool -} - -var _ corestore.Iterator = (*memDBIterator)(nil) - -// newMemDBIterator creates a new memDBIterator. -func newMemDBIterator(db *MemDB, start, end []byte, reverse bool) *memDBIterator { - return newMemDBIteratorMtxChoice(db, start, end, reverse, true) -} - -func newMemDBIteratorMtxChoice(db *MemDB, start, end []byte, reverse, useMtx bool) *memDBIterator { - ctx, cancel := context.WithCancel(context.Background()) - ch := make(chan *item, chBufferSize) - iter := &memDBIterator{ - ch: ch, - cancel: cancel, - start: start, - end: end, - useMtx: useMtx, - } - - if useMtx { - db.mtx.RLock() - } - go func() { - if useMtx { - defer db.mtx.RUnlock() - } - // Because we use [start, end) for reverse ranges, while btree uses (start, end], we need - // the following variables to handle some reverse iteration conditions ourselves. - var ( - skipEqual []byte - abortLessThan []byte - ) - visitor := func(i btree.Item) bool { - item := i.(item) - if skipEqual != nil && bytes.Equal(item.key, skipEqual) { - skipEqual = nil - return true - } - if abortLessThan != nil && bytes.Compare(item.key, abortLessThan) == -1 { - return false - } - select { - case <-ctx.Done(): - return false - case ch <- &item: - return true - } - } - switch { - case start == nil && end == nil && !reverse: - db.btree.Ascend(visitor) - case start == nil && end == nil && reverse: - db.btree.Descend(visitor) - case end == nil && !reverse: - // must handle this specially, since nil is considered less than anything else - db.btree.AscendGreaterOrEqual(newKey(start), visitor) - case !reverse: - db.btree.AscendRange(newKey(start), newKey(end), visitor) - case end == nil: - // abort after start, since we use [start, end) while btree uses (start, end] - abortLessThan = start - db.btree.Descend(visitor) - default: - // skip end and abort after start, since we use [start, end) while btree uses (start, end] - skipEqual = end - abortLessThan = start - db.btree.DescendLessOrEqual(newKey(end), visitor) - } - close(ch) - }() - - // prime the iterator with the first value, if any - if item, ok := <-ch; ok { - iter.item = item - } - - return iter -} - -// Close implements Iterator. -func (i *memDBIterator) Close() error { - i.cancel() - for range i.ch { // drain channel - } - i.item = nil - return nil -} - -// Domain implements Iterator. -func (i *memDBIterator) Domain() ([]byte, []byte) { - return i.start, i.end -} - -// Valid implements Iterator. -func (i *memDBIterator) Valid() bool { - return i.item != nil -} - -// Next implements Iterator. -func (i *memDBIterator) Next() { - i.assertIsValid() - item, ok := <-i.ch - switch { - case ok: - i.item = item - default: - i.item = nil - } -} - -// Error implements Iterator. -func (i *memDBIterator) Error() error { - return nil // famous last words -} - -// Key implements Iterator. -func (i *memDBIterator) Key() []byte { - i.assertIsValid() - return i.item.key -} - -// Value implements Iterator. -func (i *memDBIterator) Value() []byte { - i.assertIsValid() - return i.item.value -} - -func (i *memDBIterator) assertIsValid() { - if !i.Valid() { - panic("iterator is invalid") - } -} - -// memDBBatch operations -type opType int - -const ( - opTypeSet opType = iota + 1 - opTypeDelete -) - -type operation struct { - opType - key []byte - value []byte -} - -// memDBBatch handles in-memory batching. -type memDBBatch struct { - db *MemDB - ops []operation - size int -} - -var _ corestore.Batch = (*memDBBatch)(nil) - -// newMemDBBatch creates a new memDBBatch -func newMemDBBatch(db *MemDB) *memDBBatch { - return &memDBBatch{ - db: db, - ops: []operation{}, - size: 0, - } -} - -// Set implements Batch. -func (b *memDBBatch) Set(key, value []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - if value == nil { - return errors.ErrValueNil - } - if b.ops == nil { - return errors.ErrBatchClosed - } - b.size += len(key) + len(value) - b.ops = append(b.ops, operation{opTypeSet, key, value}) - return nil -} - -// Delete implements Batch. -func (b *memDBBatch) Delete(key []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - if b.ops == nil { - return errors.ErrBatchClosed - } - b.size += len(key) - b.ops = append(b.ops, operation{opTypeDelete, key, nil}) - return nil -} - -// Write implements Batch. -func (b *memDBBatch) Write() error { - if b.ops == nil { - return errors.ErrBatchClosed - } - b.db.mtx.Lock() - defer b.db.mtx.Unlock() - - for _, op := range b.ops { - switch op.opType { - case opTypeSet: - b.db.set(op.key, op.value) - case opTypeDelete: - b.db.delete(op.key) - default: - return fmt.Errorf("unknown operation type %v (%v)", op.opType, op) - } - } - - // Make sure batch cannot be used afterwards. Callers should still call Close(), for errors. - return b.Close() -} - -// WriteSync implements Batch. -func (b *memDBBatch) WriteSync() error { - return b.Write() -} - -// Close implements Batch. -func (b *memDBBatch) Close() error { - b.ops = nil - b.size = 0 - return nil -} - -// GetByteSize implements Batch -func (b *memDBBatch) GetByteSize() (int, error) { - if b.ops == nil { - return 0, errors.ErrBatchClosed - } - return b.size, nil -} diff --git a/store/v2/db/pebbledb.go b/store/v2/db/pebbledb.go deleted file mode 100644 index 382ee70799..0000000000 --- a/store/v2/db/pebbledb.go +++ /dev/null @@ -1,303 +0,0 @@ -package db - -import ( - "bytes" - "errors" - "fmt" - "path/filepath" - "slices" - - "github.com/cockroachdb/pebble" - "github.com/spf13/cast" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - storeerrors "cosmossdk.io/store/v2/errors" -) - -var _ corestore.KVStoreWithBatch = (*PebbleDB)(nil) - -// PebbleDB implements `corestore.KVStoreWithBatch` using PebbleDB as the underlying storage engine. -// It is used for only store v2 migration, since some clients use PebbleDB as -// the IAVL v0/v1 backend. -type PebbleDB struct { - storage *pebble.DB -} - -func NewPebbleDB(name, dataDir string) (*PebbleDB, error) { - return NewPebbleDBWithOpts(name, dataDir, nil) -} - -func NewPebbleDBWithOpts(name, dataDir string, opts store.DBOptions) (*PebbleDB, error) { - do := &pebble.Options{ - MaxConcurrentCompactions: func() int { return 3 }, // default 1 - } - - do.EnsureDefaults() - - if opts != nil { - files := cast.ToInt(opts.Get("maxopenfiles")) - if files > 0 { - do.MaxOpenFiles = files - } - } - dbPath := filepath.Join(dataDir, name+DBFileSuffix) - db, err := pebble.Open(dbPath, do) - if err != nil { - return nil, fmt.Errorf("failed to open PebbleDB: %w", err) - } - - return &PebbleDB{storage: db}, nil -} - -func (db *PebbleDB) Close() error { - err := db.storage.Close() - db.storage = nil - return err -} - -func (db *PebbleDB) Get(key []byte) ([]byte, error) { - if len(key) == 0 { - return nil, storeerrors.ErrKeyEmpty - } - - bz, closer, err := db.storage.Get(key) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - // in case of a fresh database - return nil, nil - } - - return nil, fmt.Errorf("failed to perform PebbleDB read: %w", err) - } - - if len(bz) == 0 { - return nil, closer.Close() - } - - return bz, closer.Close() -} - -func (db *PebbleDB) Has(key []byte) (bool, error) { - bz, err := db.Get(key) - if err != nil { - return false, err - } - - return bz != nil, nil -} - -func (db *PebbleDB) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - - return db.storage.Set(key, value, &pebble.WriteOptions{Sync: false}) -} - -func (db *PebbleDB) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - - return db.storage.Delete(key, &pebble.WriteOptions{Sync: false}) -} - -func (db *PebbleDB) Iterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: start, UpperBound: end}) - if err != nil { - return nil, fmt.Errorf("failed to create PebbleDB iterator: %w", err) - } - - return newPebbleDBIterator(itr, start, end, false), nil -} - -func (db *PebbleDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: start, UpperBound: end}) - if err != nil { - return nil, fmt.Errorf("failed to create PebbleDB iterator: %w", err) - } - - return newPebbleDBIterator(itr, start, end, true), nil -} - -func (db *PebbleDB) NewBatch() corestore.Batch { - return &pebbleDBBatch{ - db: db, - batch: db.storage.NewBatch(), - } -} - -func (db *PebbleDB) NewBatchWithSize(size int) corestore.Batch { - return &pebbleDBBatch{ - db: db, - batch: db.storage.NewBatchWithSize(size), - } -} - -var _ corestore.Iterator = (*pebbleDBIterator)(nil) - -type pebbleDBIterator struct { - source *pebble.Iterator - start []byte - end []byte - valid bool - reverse bool -} - -func newPebbleDBIterator(src *pebble.Iterator, start, end []byte, reverse bool) *pebbleDBIterator { - // move the underlying PebbleDB cursor to the first key - var valid bool - if reverse { - if end == nil { - valid = src.Last() - } else { - valid = src.SeekLT(end) - } - } else { - valid = src.First() - } - - return &pebbleDBIterator{ - source: src, - start: start, - end: end, - valid: valid, - reverse: reverse, - } -} - -func (itr *pebbleDBIterator) Domain() (start, end []byte) { - return itr.start, itr.end -} - -func (itr *pebbleDBIterator) Valid() bool { - // once invalid, forever invalid - if !itr.valid || !itr.source.Valid() { - itr.valid = false - return itr.valid - } - - // if source has error, consider it invalid - if err := itr.source.Error(); err != nil { - itr.valid = false - return itr.valid - } - - // if key is at the end or past it, consider it invalid - if end := itr.end; end != nil { - if bytes.Compare(end, itr.Key()) <= 0 { - itr.valid = false - return itr.valid - } - } - - return true -} - -func (itr *pebbleDBIterator) Key() []byte { - itr.assertIsValid() - return slices.Clone(itr.source.Key()) -} - -func (itr *pebbleDBIterator) Value() []byte { - itr.assertIsValid() - return slices.Clone(itr.source.Value()) -} - -func (itr *pebbleDBIterator) Next() { - itr.assertIsValid() - - if itr.reverse { - itr.valid = itr.source.Prev() - } else { - itr.valid = itr.source.Next() - } -} - -func (itr *pebbleDBIterator) Error() error { - return itr.source.Error() -} - -func (itr *pebbleDBIterator) Close() error { - err := itr.source.Close() - itr.source = nil - itr.valid = false - - return err -} - -func (itr *pebbleDBIterator) assertIsValid() { - if !itr.valid { - panic("pebbleDB iterator is invalid") - } -} - -var _ corestore.Batch = (*pebbleDBBatch)(nil) - -type pebbleDBBatch struct { - db *PebbleDB - batch *pebble.Batch -} - -func (b *pebbleDBBatch) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - - return b.batch.Set(key, value, nil) -} - -func (b *pebbleDBBatch) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - - return b.batch.Delete(key, nil) -} - -func (b *pebbleDBBatch) Write() error { - err := b.batch.Commit(&pebble.WriteOptions{Sync: false}) - if err != nil { - return fmt.Errorf("failed to write PebbleDB batch: %w", err) - } - - return nil -} - -func (b *pebbleDBBatch) WriteSync() error { - err := b.batch.Commit(&pebble.WriteOptions{Sync: true}) - if err != nil { - return fmt.Errorf("failed to write PebbleDB batch: %w", err) - } - - return nil -} - -func (b *pebbleDBBatch) Close() error { - return b.batch.Close() -} - -func (b *pebbleDBBatch) GetByteSize() (int, error) { - return b.batch.Len(), nil -} diff --git a/store/v2/db/prefixdb.go b/store/v2/db/prefixdb.go deleted file mode 100644 index fc13bbb5af..0000000000 --- a/store/v2/db/prefixdb.go +++ /dev/null @@ -1,367 +0,0 @@ -package db - -import ( - "bytes" - "fmt" - "sync" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/errors" -) - -// PrefixDB wraps a namespace of another database as a logical database. -type PrefixDB struct { - mtx sync.Mutex - prefix []byte - db corestore.KVStoreWithBatch -} - -var _ corestore.KVStoreWithBatch = (*PrefixDB)(nil) - -// NewPrefixDB lets you namespace multiple corestore.KVStores within a single corestore.KVStore. -func NewPrefixDB(db corestore.KVStoreWithBatch, prefix []byte) *PrefixDB { - return &PrefixDB{ - prefix: prefix, - db: db, - } -} - -// Get implements corestore.KVStore. -func (pdb *PrefixDB) Get(key []byte) ([]byte, error) { - if len(key) == 0 { - return nil, errors.ErrKeyEmpty - } - - pkey := pdb.prefixed(key) - value, err := pdb.db.Get(pkey) - if err != nil { - return nil, err - } - - return value, nil -} - -// Has implements corestore.KVStore. -func (pdb *PrefixDB) Has(key []byte) (bool, error) { - if len(key) == 0 { - return false, errors.ErrKeyEmpty - } - - ok, err := pdb.db.Has(pdb.prefixed(key)) - if err != nil { - return ok, err - } - - return ok, nil -} - -// Set implements corestore.KVStore. -func (pdb *PrefixDB) Set(key, value []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - - return pdb.db.Set(pdb.prefixed(key), value) -} - -// Delete implements corestore.KVStore. -func (pdb *PrefixDB) Delete(key []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - - return pdb.db.Delete(pdb.prefixed(key)) -} - -// Iterator implements corestore.KVStore. -func (pdb *PrefixDB) Iterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - var pstart, pend []byte - pstart = append(cp(pdb.prefix), start...) - if end == nil { - pend = cpIncr(pdb.prefix) - } else { - pend = append(cp(pdb.prefix), end...) - } - itr, err := pdb.db.Iterator(pstart, pend) - if err != nil { - return nil, err - } - - return newPrefixIterator(pdb.prefix, start, end, itr) -} - -// ReverseIterator implements corestore.KVStore. -func (pdb *PrefixDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - var pstart, pend []byte - pstart = append(cp(pdb.prefix), start...) - if end == nil { - pend = cpIncr(pdb.prefix) - } else { - pend = append(cp(pdb.prefix), end...) - } - ritr, err := pdb.db.ReverseIterator(pstart, pend) - if err != nil { - return nil, err - } - - return newPrefixIterator(pdb.prefix, start, end, ritr) -} - -// NewBatch implements corestore.BatchCreator. -func (pdb *PrefixDB) NewBatch() corestore.Batch { - return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) -} - -// NewBatchWithSize implements corestore.BatchCreator. -func (pdb *PrefixDB) NewBatchWithSize(size int) corestore.Batch { - return newPrefixBatch(pdb.prefix, pdb.db.NewBatchWithSize(size)) -} - -// Close implements corestore.KVStore. -func (pdb *PrefixDB) Close() error { - pdb.mtx.Lock() - defer pdb.mtx.Unlock() - - return pdb.db.Close() -} - -// Print implements corestore.KVStore. -func (pdb *PrefixDB) Print() error { - fmt.Printf("prefix: %X\n", pdb.prefix) - - itr, err := pdb.Iterator(nil, nil) - if err != nil { - return err - } - defer itr.Close() - for ; itr.Valid(); itr.Next() { - key := itr.Key() - value := itr.Value() - fmt.Printf("[%X]:\t[%X]\n", key, value) - } - return nil -} - -func (pdb *PrefixDB) prefixed(key []byte) []byte { - return append(cp(pdb.prefix), key...) -} - -// IteratePrefix is a convenience function for iterating over a key domain -// restricted by prefix. -func IteratePrefix(db corestore.KVStore, prefix []byte) (corestore.Iterator, error) { - var start, end []byte - if len(prefix) == 0 { - start = nil - end = nil - } else { - start = cp(prefix) - end = cpIncr(prefix) - } - itr, err := db.Iterator(start, end) - if err != nil { - return nil, err - } - return itr, nil -} - -// Strips prefix while iterating from Iterator. -type prefixDBIterator struct { - prefix []byte - start []byte - end []byte - source corestore.Iterator - valid bool - err error -} - -var _ corestore.Iterator = (*prefixDBIterator)(nil) - -func newPrefixIterator(prefix, start, end []byte, source corestore.Iterator) (*prefixDBIterator, error) { - pitrInvalid := &prefixDBIterator{ - prefix: prefix, - start: start, - end: end, - source: source, - valid: false, - } - - // Empty keys are not allowed, so if a key exists in the database that exactly matches the - // prefix we need to skip it. - if source.Valid() && bytes.Equal(source.Key(), prefix) { - source.Next() - } - - if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { - return pitrInvalid, nil - } - - return &prefixDBIterator{ - prefix: prefix, - start: start, - end: end, - source: source, - valid: true, - }, nil -} - -// Domain implements Iterator. -func (itr *prefixDBIterator) Domain() (start, end []byte) { - return itr.start, itr.end -} - -// Valid implements Iterator. -func (itr *prefixDBIterator) Valid() bool { - if !itr.valid || itr.err != nil || !itr.source.Valid() { - return false - } - - key := itr.source.Key() - if len(key) < len(itr.prefix) || !bytes.Equal(key[:len(itr.prefix)], itr.prefix) { - itr.err = fmt.Errorf("received invalid key from backend: %x (expected prefix %x)", - key, itr.prefix) - return false - } - - return true -} - -// Next implements Iterator. -func (itr *prefixDBIterator) Next() { - itr.assertIsValid() - itr.source.Next() - - if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { - itr.valid = false - } else if bytes.Equal(itr.source.Key(), itr.prefix) { - // Empty keys are not allowed, so if a key exists in the database that exactly matches the - // prefix we need to skip it. - itr.Next() - } -} - -// Key implements Iterator. -func (itr *prefixDBIterator) Key() []byte { - itr.assertIsValid() - key := itr.source.Key() - return key[len(itr.prefix):] // we have checked the key in Valid() -} - -// Value implements Iterator. -func (itr *prefixDBIterator) Value() []byte { - itr.assertIsValid() - return itr.source.Value() -} - -// Error implements Iterator. -func (itr *prefixDBIterator) Error() error { - if err := itr.source.Error(); err != nil { - return err - } - return itr.err -} - -// Close implements Iterator. -func (itr *prefixDBIterator) Close() error { - return itr.source.Close() -} - -func (itr *prefixDBIterator) assertIsValid() { - if !itr.Valid() { - panic("iterator is invalid") - } -} - -type prefixDBBatch struct { - prefix []byte - source corestore.Batch -} - -var _ corestore.Batch = (*prefixDBBatch)(nil) - -func newPrefixBatch(prefix []byte, source corestore.Batch) prefixDBBatch { - return prefixDBBatch{ - prefix: prefix, - source: source, - } -} - -// Set implements corestore.Batch. -func (pb prefixDBBatch) Set(key, value []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - if value == nil { - return errors.ErrValueNil - } - pkey := append(cp(pb.prefix), key...) - return pb.source.Set(pkey, value) -} - -// Delete implements corestore.Batch. -func (pb prefixDBBatch) Delete(key []byte) error { - if len(key) == 0 { - return errors.ErrKeyEmpty - } - pkey := append(cp(pb.prefix), key...) - return pb.source.Delete(pkey) -} - -// Write implements corestore.Batch. -func (pb prefixDBBatch) Write() error { - return pb.source.Write() -} - -// WriteSync implements corestore.Batch. -func (pb prefixDBBatch) WriteSync() error { - return pb.source.WriteSync() -} - -// Close implements corestore.Batch. -func (pb prefixDBBatch) Close() error { - return pb.source.Close() -} - -// GetByteSize implements corestore.Batch -func (pb prefixDBBatch) GetByteSize() (int, error) { - if pb.source == nil { - return 0, errors.ErrBatchClosed - } - return pb.source.GetByteSize() -} - -func cp(bz []byte) (ret []byte) { - ret = make([]byte, len(bz)) - copy(ret, bz) - return ret -} - -// Returns a slice of the same length (big endian) -// except incremented by one. -// Returns nil on overflow (e.g. if bz bytes are all 0xFF) -// CONTRACT: len(bz) > 0 -func cpIncr(bz []byte) (ret []byte) { - if len(bz) == 0 { - panic("cpIncr expects non-zero bz length") - } - ret = cp(bz) - for i := len(bz) - 1; i >= 0; i-- { - if ret[i] < byte(0xFF) { - ret[i]++ - return - } - ret[i] = byte(0x00) - if i == 0 { - // Overflow - return nil - } - } - return nil -} diff --git a/store/v2/db/rocksdb.go b/store/v2/db/rocksdb.go deleted file mode 100644 index 5378de85e7..0000000000 --- a/store/v2/db/rocksdb.go +++ /dev/null @@ -1,348 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package db - -import ( - "bytes" - "fmt" - "path/filepath" - "runtime" - "slices" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" - storeerrors "cosmossdk.io/store/v2/errors" -) - -var ( - _ corestore.KVStoreWithBatch = (*RocksDB)(nil) - - defaultReadOpts = grocksdb.NewDefaultReadOptions() -) - -// RocksDB implements `corestore.KVStoreWithBatch` using RocksDB as the underlying storage engine. -// It is used for only store v2 migration, since some clients use RocksDB as -// the IAVL v0/v1 backend. -type RocksDB struct { - storage *grocksdb.DB -} - -// defaultRocksdbOptions, good enough for most cases, including heavy workloads. -// 1GB table cache, 512MB write buffer(may use 50% more on heavy workloads). -// compression: snappy as default, need to -lsnappy to enable. -func defaultRocksdbOptions() *grocksdb.Options { - bbto := grocksdb.NewDefaultBlockBasedTableOptions() - bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30)) - bbto.SetFilterPolicy(grocksdb.NewBloomFilter(10)) - - rocksdbOpts := grocksdb.NewDefaultOptions() - rocksdbOpts.SetBlockBasedTableFactory(bbto) - // SetMaxOpenFiles to 4096 seems to provide a reliable performance boost - rocksdbOpts.SetMaxOpenFiles(4096) - rocksdbOpts.SetCreateIfMissing(true) - rocksdbOpts.IncreaseParallelism(runtime.NumCPU()) - // 1.5GB maximum memory use for writebuffer. - rocksdbOpts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) - return rocksdbOpts -} - -func NewRocksDB(name, dataDir string) (*RocksDB, error) { - opts := defaultRocksdbOptions() - opts.SetCreateIfMissing(true) - - return NewRocksDBWithOpts(name, dataDir, opts) -} - -func NewRocksDBWithOpts(name, dataDir string, opts *grocksdb.Options) (*RocksDB, error) { - dbPath := filepath.Join(dataDir, name+DBFileSuffix) - storage, err := grocksdb.OpenDb(opts, dbPath) - if err != nil { - return nil, fmt.Errorf("failed to open RocksDB: %w", err) - } - - return &RocksDB{ - storage: storage, - }, nil -} - -func (db *RocksDB) Close() error { - db.storage.Close() - db.storage = nil - return nil -} - -func (db *RocksDB) Get(key []byte) ([]byte, error) { - bz, err := db.storage.GetBytes(defaultReadOpts, key) - if err != nil { - return nil, err - } - - return bz, nil -} - -func (db *RocksDB) Has(key []byte) (bool, error) { - bz, err := db.Get(key) - if err != nil { - return false, err - } - - return bz != nil, nil -} - -func (db *RocksDB) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - - return db.storage.Put(grocksdb.NewDefaultWriteOptions(), key, value) -} - -func (db *RocksDB) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - - return db.storage.Delete(grocksdb.NewDefaultWriteOptions(), key) -} - -func (db *RocksDB) Iterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - itr := db.storage.NewIterator(defaultReadOpts) - return newRocksDBIterator(itr, start, end, false), nil -} - -func (db *RocksDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - itr := db.storage.NewIterator(defaultReadOpts) - return newRocksDBIterator(itr, start, end, true), nil -} - -func (db *RocksDB) NewBatch() corestore.Batch { - return &rocksDBBatch{ - db: db, - batch: grocksdb.NewWriteBatch(), - } -} - -func (db *RocksDB) NewBatchWithSize(_ int) corestore.Batch { - return db.NewBatch() -} - -var _ corestore.Iterator = (*rocksDBIterator)(nil) - -type rocksDBIterator struct { - source *grocksdb.Iterator - start []byte - end []byte - valid bool - reverse bool -} - -func newRocksDBIterator(src *grocksdb.Iterator, start, end []byte, reverse bool) *rocksDBIterator { - if reverse { - if end == nil { - src.SeekToLast() - } else { - src.Seek(end) - - if src.Valid() { - eoaKey := readOnlySlice(src.Key()) // end or after key - if bytes.Compare(end, eoaKey) <= 0 { - src.Prev() - } - } else { - src.SeekToLast() - } - } - } else { - if start == nil { - src.SeekToFirst() - } else { - src.Seek(start) - } - } - - return &rocksDBIterator{ - source: src, - start: start, - end: end, - reverse: reverse, - valid: src.Valid(), - } -} - -func (itr *rocksDBIterator) Domain() (start, end []byte) { - return itr.start, itr.end -} - -func (itr *rocksDBIterator) Valid() bool { - // once invalid, forever invalid - if !itr.valid { - return false - } - - // if source has error, consider it invalid - if err := itr.source.Err(); err != nil { - itr.valid = false - return false - } - - // if source is invalid, consider it invalid - if !itr.source.Valid() { - itr.valid = false - return false - } - - // if key is at the end or past it, consider it invalid - start := itr.start - end := itr.end - key := readOnlySlice(itr.source.Key()) - - if itr.reverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.valid = false - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.valid = false - return false - } - } - - return true -} - -func (itr *rocksDBIterator) Key() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Key()) -} - -func (itr *rocksDBIterator) Value() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Value()) -} - -func (itr *rocksDBIterator) Next() { - if !itr.valid { - return - } - - if itr.reverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr *rocksDBIterator) Error() error { - return itr.source.Err() -} - -func (itr *rocksDBIterator) Close() error { - itr.source.Close() - itr.source = nil - itr.valid = false - - return nil -} - -func (itr *rocksDBIterator) assertIsValid() { - if !itr.valid { - panic("rocksDB iterator is invalid") - } -} - -type rocksDBBatch struct { - db *RocksDB - batch *grocksdb.WriteBatch -} - -func (b *rocksDBBatch) Set(key, value []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if value == nil { - return storeerrors.ErrValueNil - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - - b.batch.Put(key, value) - return nil -} - -func (b *rocksDBBatch) Delete(key []byte) error { - if len(key) == 0 { - return storeerrors.ErrKeyEmpty - } - if b.batch == nil { - return storeerrors.ErrBatchClosed - } - - b.batch.Delete(key) - return nil -} - -func (b *rocksDBBatch) Write() error { - writeOpts := grocksdb.NewDefaultWriteOptions() - writeOpts.SetSync(false) - - if err := b.db.storage.Write(writeOpts, b.batch); err != nil { - return fmt.Errorf("failed to write RocksDB batch: %w", err) - } - - return nil -} - -func (b *rocksDBBatch) WriteSync() error { - writeOpts := grocksdb.NewDefaultWriteOptions() - writeOpts.SetSync(true) - - if err := b.db.storage.Write(writeOpts, b.batch); err != nil { - return fmt.Errorf("failed to write RocksDB batch: %w", err) - } - - return nil -} - -func (b *rocksDBBatch) Close() error { - b.batch.Destroy() - return nil -} - -func (b *rocksDBBatch) GetByteSize() (int, error) { - return len(b.batch.Data()), nil -} - -func readOnlySlice(s *grocksdb.Slice) []byte { - if !s.Exists() { - return nil - } - - return s.Data() -} - -// copyAndFreeSlice will copy a given RocksDB slice and free it. If the slice -// does not exist, will be returned. -func copyAndFreeSlice(s *grocksdb.Slice) []byte { - defer s.Free() - - if !s.Exists() { - return nil - } - - return slices.Clone(s.Data()) -} diff --git a/store/v2/db/rocksdb_noflag.go b/store/v2/db/rocksdb_noflag.go deleted file mode 100644 index 6e05c9a10a..0000000000 --- a/store/v2/db/rocksdb_noflag.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build !rocksdb -// +build !rocksdb - -package db - -import ( - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -var _ corestore.KVStoreWithBatch = (*RocksDB)(nil) - -// RocksDB implements `corestore.KVStoreWithBatch` using RocksDB as the underlying storage engine. -// It is used for only store v2 migration, since some clients use RocksDB as -// the IAVL v0/v1 backend. -type RocksDB struct{} - -func NewRocksDB(name, dataDir string) (*RocksDB, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func NewRocksDBWithOpts(dataDir string, opts store.DBOptions) (*RocksDB, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Close() error { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Get(key []byte) ([]byte, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Has(key []byte) (bool, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Set(key, value []byte) error { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Delete(key []byte) error { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) Iterator(start, end []byte) (corestore.Iterator, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) NewBatch() corestore.Batch { - panic("rocksdb must be built with -tags rocksdb") -} - -func (db *RocksDB) NewBatchWithSize(_ int) corestore.Batch { - panic("rocksdb must be built with -tags rocksdb") -} diff --git a/store/v2/db/rocksdb_test.go b/store/v2/db/rocksdb_test.go deleted file mode 100644 index 75147a5685..0000000000 --- a/store/v2/db/rocksdb_test.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package db - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -func TestRocksDBSuite(t *testing.T) { - db, err := NewRocksDB("test", t.TempDir()) - require.NoError(t, err) - - suite.Run(t, &DBTestSuite{ - db: db, - }) -} diff --git a/store/v2/db/wrapper.go b/store/v2/db/wrapper.go deleted file mode 100644 index 72d4ddca30..0000000000 --- a/store/v2/db/wrapper.go +++ /dev/null @@ -1,39 +0,0 @@ -package db - -import ( - idb "github.com/cosmos/iavl/db" - - corestore "cosmossdk.io/core/store" -) - -// Wrapper wraps a `corestore.KVStoreWithBatch` to implement iavl.DB which is used by iavl.Tree. -type Wrapper struct { - corestore.KVStoreWithBatch -} - -var _ idb.DB = (*Wrapper)(nil) - -// NewWrapper returns a new Wrapper. -func NewWrapper(db corestore.KVStoreWithBatch) *Wrapper { - return &Wrapper{KVStoreWithBatch: db} -} - -// Iterator implements iavl.DB. -func (db *Wrapper) Iterator(start, end []byte) (idb.Iterator, error) { - return db.KVStoreWithBatch.Iterator(start, end) -} - -// ReverseIterator implements iavl.DB. -func (db *Wrapper) ReverseIterator(start, end []byte) (idb.Iterator, error) { - return db.KVStoreWithBatch.ReverseIterator(start, end) -} - -// NewBatch implements iavl.DB. -func (db *Wrapper) NewBatch() idb.Batch { - return db.KVStoreWithBatch.NewBatch() -} - -// NewBatchWithSize implements iavl.DB. -func (db *Wrapper) NewBatchWithSize(size int) idb.Batch { - return db.KVStoreWithBatch.NewBatchWithSize(size) -} diff --git a/store/v2/errors/errors.go b/store/v2/errors/errors.go deleted file mode 100644 index 6ca2be95ce..0000000000 --- a/store/v2/errors/errors.go +++ /dev/null @@ -1,52 +0,0 @@ -package errors - -import ( - "errors" - "fmt" -) - -var ( - // ErrInvalidProof is returned when a proof is invalid - ErrInvalidProof = errors.New("invalid proof") - - // ErrTxDecode is returned if we cannot parse a transaction - ErrTxDecode = errors.New("tx parse error") - - // ErrUnknownRequest to doc - ErrUnknownRequest = errors.New("unknown request") - - // ErrLogic defines an internal logic error, e.g. an invariant or assertion - // that is violated. It is a programmer error, not a user-facing error. - ErrLogic = errors.New("internal logic error") - - // ErrConflict defines a conflict error, e.g. when two goroutines try to access - // the same resource and one of them fails. - ErrConflict = errors.New("conflict") - - // ErrInvalidRequest defines an ABCI typed error where the request contains - // invalid data. - ErrInvalidRequest = errors.New("invalid request") - - ErrClosed = errors.New("closed") - ErrRecordNotFound = errors.New("record not found") - ErrUnknownStoreKey = errors.New("unknown store key") - ErrKeyEmpty = errors.New("key empty") - ErrStartAfterEnd = errors.New("start key after end key") - - // ErrBatchClosed is returned when a closed or written batch is used. - ErrBatchClosed = errors.New("batch has been written or closed") - - // ErrValueNil is returned when attempting to set a nil value. - ErrValueNil = errors.New("value nil") -) - -// ErrVersionPruned defines an error returned when a version queried is pruned -// or does not exist. -type ErrVersionPruned struct { - RequestedVersion uint64 - EarliestVersion uint64 -} - -func (e ErrVersionPruned) Error() string { - return fmt.Sprintf("requested version %d is pruned; earliest available version is: %d", e.RequestedVersion, e.EarliestVersion) -} diff --git a/store/v2/go.mod b/store/v2/go.mod deleted file mode 100644 index ea0af2b941..0000000000 --- a/store/v2/go.mod +++ /dev/null @@ -1,70 +0,0 @@ -module cosmossdk.io/store/v2 - -go 1.21 - -require ( - cosmossdk.io/core v0.12.0 - cosmossdk.io/core/testing v0.0.0-00010101000000-000000000000 - cosmossdk.io/errors v1.0.1 - cosmossdk.io/log v1.3.1 - github.com/cockroachdb/pebble v1.1.0 - github.com/cosmos/gogoproto v1.5.0 - github.com/cosmos/iavl v1.2.1-0.20240725141113-7adc688cf179 - github.com/cosmos/ics23/go v0.10.0 - github.com/google/btree v1.1.2 - github.com/hashicorp/go-metrics v0.5.3 - github.com/linxGnu/grocksdb v1.8.14 - github.com/mattn/go-sqlite3 v1.14.22 - github.com/spf13/cast v1.6.0 - github.com/stretchr/testify v1.9.0 - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d - golang.org/x/sync v0.7.0 -) - -require ( - github.com/DataDog/zstd v1.5.5 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.11.1 // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/redact v1.1.5 // indirect - github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/cosmos/cosmos-db v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/dot v1.6.1 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/hashicorp/go-immutable-radix v1.0.0 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/klauspost/compress v1.17.7 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/rs/zerolog v1.33.0 // indirect - github.com/tidwall/btree v1.7.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 // indirect - google.golang.org/grpc v1.64.1 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace cosmossdk.io/core => ../../core - -replace cosmossdk.io/core/testing => ../../core/testing diff --git a/store/v2/go.sum b/store/v2/go.sum deleted file mode 100644 index 1f63dd3cea..0000000000 --- a/store/v2/go.sum +++ /dev/null @@ -1,325 +0,0 @@ -cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= -cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= -cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= -cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= -github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= -github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.0 h1:pcFh8CdCIt2kmEpK0OIatq67Ln9uGDYY3d5XnE0LJG4= -github.com/cockroachdb/pebble v1.1.0/go.mod h1:sEHm5NOXxyiAoKWhoFxT8xMgd/f3RA6qUqQ1BXKrh2E= -github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= -github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= -github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAKs= -github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= -github.com/cosmos/gogoproto v1.5.0 h1:SDVwzEqZDDBoslaeZg+dGE55hdzHfgUA40pEanMh52o= -github.com/cosmos/gogoproto v1.5.0/go.mod h1:iUM31aofn3ymidYG6bUR5ZFrk+Om8p5s754eMUcyp8I= -github.com/cosmos/iavl v1.2.1-0.20240725141113-7adc688cf179 h1:wmwDn7V3RodN9auB3FooSQxs46nHVE3u0mb87TJkZFE= -github.com/cosmos/iavl v1.2.1-0.20240725141113-7adc688cf179/go.mod h1:GiM43q0pB+uG53mLxLDzimxM9l/5N9UuSY3/D0huuVw= -github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= -github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI= -github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps= -github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE= -github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ= -github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= -github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= -github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f h1:3CW0unweImhOzd5FmYuRsD4Y4oQFKZIjAnKbjV4WIrw= -golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 h1:SbSDUWW1PAO24TNpLdeheoYPd7kllICcLU52x6eD4kQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= diff --git a/store/v2/internal/conv/doc.go b/store/v2/internal/conv/doc.go deleted file mode 100644 index 4b45d1ab53..0000000000 --- a/store/v2/internal/conv/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package conv provides internal functions for conversions and data manipulation -package conv diff --git a/store/v2/internal/conv/string.go b/store/v2/internal/conv/string.go deleted file mode 100644 index 96d89c3a5f..0000000000 --- a/store/v2/internal/conv/string.go +++ /dev/null @@ -1,19 +0,0 @@ -package conv - -import ( - "unsafe" -) - -// UnsafeStrToBytes uses unsafe to convert string into byte array. Returned bytes -// must not be altered after this function is called as it will cause a segmentation fault. -func UnsafeStrToBytes(s string) []byte { - return unsafe.Slice(unsafe.StringData(s), len(s)) // ref https://github.com/golang/go/issues/53003#issuecomment-1140276077 -} - -// UnsafeBytesToStr is meant to make a zero allocation conversion -// from []byte -> string to speed up operations, it is not meant -// to be used generally, but for a specific pattern to delete keys -// from a map. -func UnsafeBytesToStr(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} diff --git a/store/v2/internal/conv/string_test.go b/store/v2/internal/conv/string_test.go deleted file mode 100644 index 3a14517531..0000000000 --- a/store/v2/internal/conv/string_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package conv - -import ( - "runtime" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/suite" -) - -func TestStringSuite(t *testing.T) { - suite.Run(t, new(StringSuite)) -} - -type StringSuite struct{ suite.Suite } - -func unsafeConvertStr() []byte { - return UnsafeStrToBytes("abc") -} - -func (s *StringSuite) TestUnsafeStrToBytes() { - // we convert in other function to trigger GC. We want to check that - // the underlying array in []bytes is accessible after GC will finish swapping. - for i := 0; i < 5; i++ { - b := unsafeConvertStr() - runtime.GC() - <-time.NewTimer(2 * time.Millisecond).C - b2 := append(b, 'd') - s.Equal("abc", string(b)) - s.Equal("abcd", string(b2)) - } -} - -func unsafeConvertBytes() string { - return UnsafeBytesToStr([]byte("abc")) -} - -func (s *StringSuite) TestUnsafeBytesToStr() { - // we convert in other function to trigger GC. We want to check that - // the underlying array in []bytes is accessible after GC will finish swapping. - for i := 0; i < 5; i++ { - str := unsafeConvertBytes() - runtime.GC() - <-time.NewTimer(2 * time.Millisecond).C - s.Equal("abc", str) - } -} - -func BenchmarkUnsafeStrToBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - UnsafeStrToBytes(strconv.Itoa(i)) - } -} diff --git a/store/v2/internal/encoding/changeset.go b/store/v2/internal/encoding/changeset.go deleted file mode 100644 index 8aefed75c3..0000000000 --- a/store/v2/internal/encoding/changeset.go +++ /dev/null @@ -1,125 +0,0 @@ -package encoding - -import ( - "bytes" - "fmt" - - corestore "cosmossdk.io/core/store" -) - -// encodedSize returns the size of the encoded Changeset. -func encodedSize(cs *corestore.Changeset) int { - size := EncodeUvarintSize(uint64(len(cs.Changes))) - for _, changes := range cs.Changes { - size += EncodeBytesSize(changes.Actor) - size += EncodeUvarintSize(uint64(len(changes.StateChanges))) - for _, pair := range changes.StateChanges { - size += EncodeBytesSize(pair.Key) - size += EncodeUvarintSize(1) // pair.Remove - if !pair.Remove { - size += EncodeBytesSize(pair.Value) - } - } - } - return size -} - -// MarshalChangeset returns the encoded byte representation of Changeset. -// NOTE: The Changeset is encoded as follows: -// - number of store keys (uvarint) -// - for each store key: -// -- store key (bytes) -// -- number of pairs (uvarint) -// -- for each pair: -// --- key (bytes) -// --- remove (1 byte) -// --- value (bytes) -func MarshalChangeset(cs *corestore.Changeset) ([]byte, error) { - var buf bytes.Buffer - buf.Grow(encodedSize(cs)) - - if err := EncodeUvarint(&buf, uint64(len(cs.Changes))); err != nil { - return nil, err - } - for _, changes := range cs.Changes { - if err := EncodeBytes(&buf, changes.Actor); err != nil { - return nil, err - } - if err := EncodeUvarint(&buf, uint64(len(changes.StateChanges))); err != nil { - return nil, err - } - for _, pair := range changes.StateChanges { - if err := EncodeBytes(&buf, pair.Key); err != nil { - return nil, err - } - if pair.Remove { - if err := EncodeUvarint(&buf, 1); err != nil { - return nil, err - } - } else { - if err := EncodeUvarint(&buf, 0); err != nil { - return nil, err - } - if err := EncodeBytes(&buf, pair.Value); err != nil { - return nil, err - } - } - } - } - - return buf.Bytes(), nil -} - -// UnmarshalChangeset decodes the Changeset from the given byte slice. -func UnmarshalChangeset(cs *corestore.Changeset, buf []byte) error { - storeCount, n, err := DecodeUvarint(buf) - if err != nil { - return err - } - buf = buf[n:] - changes := make([]corestore.StateChanges, storeCount) - for i := uint64(0); i < storeCount; i++ { - storeKey, n, err := DecodeBytes(buf) - if err != nil { - return err - } - buf = buf[n:] - - pairCount, n, err := DecodeUvarint(buf) - if err != nil { - return err - } - buf = buf[n:] - - pairs := make([]corestore.KVPair, pairCount) - for j := uint64(0); j < pairCount; j++ { - pairs[j].Key, n, err = DecodeBytes(buf) - if err != nil { - return err - } - buf = buf[n:] - - remove, n, err := DecodeUvarint(buf) - if err != nil { - return err - } - buf = buf[n:] - if remove == 0 { - pairs[j].Remove = false - pairs[j].Value, n, err = DecodeBytes(buf) - if err != nil { - return err - } - buf = buf[n:] - } else if remove == 1 { - pairs[j].Remove = true - } else { - return fmt.Errorf("invalid remove flag: %d", remove) - } - } - changes[i] = corestore.StateChanges{Actor: storeKey, StateChanges: pairs} - } - cs.Changes = changes - - return nil -} diff --git a/store/v2/internal/encoding/changeset_test.go b/store/v2/internal/encoding/changeset_test.go deleted file mode 100644 index 03313936b9..0000000000 --- a/store/v2/internal/encoding/changeset_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package encoding - -import ( - "testing" - - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" -) - -func TestChangesetMarshal(t *testing.T) { - testcases := []struct { - name string - changeset *corestore.Changeset - encodedSize int - encodedBytes []byte - }{ - { - name: "empty", - changeset: corestore.NewChangeset(), - encodedSize: 1, - encodedBytes: []byte{0x0}, - }, - { - name: "one store", - changeset: &corestore.Changeset{Changes: []corestore.StateChanges{ - { - Actor: []byte("storekey"), - StateChanges: corestore.KVPairs{ - {Key: []byte("key"), Value: []byte("value"), Remove: false}, - }, - }, - }}, - encodedSize: 1 + 1 + 8 + 1 + 1 + 3 + 1 + 1 + 5, - encodedBytes: []byte{0x1, 0x8, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x6b, 0x65, 0x79, 0x1, 0x3, 0x6b, 0x65, 0x79, 0x0, 0x5, 0x76, 0x61, 0x6c, 0x75, 0x65}, - }, - { - name: "one remove store", - changeset: &corestore.Changeset{Changes: []corestore.StateChanges{ - { - Actor: []byte("storekey"), - StateChanges: corestore.KVPairs{ - {Key: []byte("key"), Remove: true}, - }, - }, - }}, - encodedSize: 1 + 1 + 8 + 1 + 1 + 3 + 1, - encodedBytes: []byte{0x1, 0x8, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x6b, 0x65, 0x79, 0x1, 0x3, 0x6b, 0x65, 0x79, 0x1}, - }, - { - name: "two stores", - changeset: &corestore.Changeset{Changes: []corestore.StateChanges{ - { - Actor: []byte("storekey1"), - StateChanges: corestore.KVPairs{ - {Key: []byte("key1"), Value: []byte("value1"), Remove: false}, - }, - }, - { - Actor: []byte("storekey2"), - StateChanges: corestore.KVPairs{ - {Key: []byte("key2"), Value: []byte("value2"), Remove: false}, - {Key: []byte("key1"), Remove: true}, - }, - }, - }}, - encodedSize: 2 + 1 + 9 + 1 + 1 + 4 + 1 + 6 + 1 + 9 + 1 + 1 + 4 + 1 + 1 + 6 + 1 + 4 + 1, - // encodedBytes: it is not deterministic, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - // check the encoded size - require.Equal(t, encodedSize(tc.changeset), tc.encodedSize, "encoded size mismatch") - // check the encoded bytes - encodedBytes, err := MarshalChangeset(tc.changeset) - require.NoError(t, err, "marshal error") - if len(tc.encodedBytes) != 0 { - require.Equal(t, encodedBytes, tc.encodedBytes, "encoded bytes mismatch") - } - // check the unmarshaled changeset - cs := corestore.NewChangeset() - require.NoError(t, UnmarshalChangeset(cs, encodedBytes), "unmarshal error") - require.Equal(t, len(tc.changeset.Changes), len(cs.Changes), "unmarshaled changeset store size mismatch") - for i, changes := range tc.changeset.Changes { - require.Equal(t, changes.Actor, cs.Changes[i].Actor, "unmarshaled changeset store key mismatch") - require.Equal(t, len(changes.StateChanges), len(cs.Changes[i].StateChanges), "unmarshaled changeset StateChanges size mismatch") - for j, pair := range changes.StateChanges { - require.Equal(t, pair, cs.Changes[i].StateChanges[j], "unmarshaled changeset pair mismatch") - } - } - }) - } -} diff --git a/store/v2/internal/encoding/encoding.go b/store/v2/internal/encoding/encoding.go deleted file mode 100644 index b73b923f11..0000000000 --- a/store/v2/internal/encoding/encoding.go +++ /dev/null @@ -1,166 +0,0 @@ -package encoding - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" - "sync" -) - -var bufPool = &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -var varintPool = &sync.Pool{ - New: func() interface{} { - return &[binary.MaxVarintLen64]byte{} - }, -} - -var uvarintPool = &sync.Pool{ - New: func() interface{} { - return &[binary.MaxVarintLen64]byte{} - }, -} - -// DecodeBytes decodes a varint length-prefixed byte slice, returning it along with the number -// of input bytes read. -// Assumes bz will not be mutated. -func DecodeBytes(bz []byte) ([]byte, int, error) { - s, n, err := DecodeUvarint(bz) - if err != nil { - return nil, n, err - } - // Make sure size doesn't overflow. ^uint(0) >> 1 will help determine the - // max int value variably on 32-bit and 64-bit machines. We also doublecheck - // that size is positive. - size := int(s) - if s >= uint64(^uint(0)>>1) || size < 0 { - return nil, n, fmt.Errorf("invalid out of range length %v decoding []byte", s) - } - // Make sure end index doesn't overflow. We know n>0 from decodeUvarint(). - end := n + size - if end < n { - return nil, n, fmt.Errorf("invalid out of range length %v decoding []byte", size) - } - // Make sure the end index is within bounds. - if len(bz) < end { - return nil, n, fmt.Errorf("insufficient bytes decoding []byte of length %v", size) - } - return bz[n:end], end, nil -} - -// DecodeUvarint decodes a varint-encoded unsigned integer from a byte slice, returning it and the -// number of bytes decoded. -func DecodeUvarint(bz []byte) (uint64, int, error) { - u, n := binary.Uvarint(bz) - if n == 0 { - // buf too small - return u, n, errors.New("buffer too small") - } else if n < 0 { - // value larger than 64 bits (overflow) - // and -n is the number of bytes read - n = -n - return u, n, errors.New("EOF decoding uvarint") - } - return u, n, nil -} - -// DecodeVarint decodes a varint-encoded integer from a byte slice, returning it and the number of -// bytes decoded. -func DecodeVarint(bz []byte) (int64, int, error) { - i, n := binary.Varint(bz) - if n == 0 { - return i, n, errors.New("buffer too small") - } else if n < 0 { - // value larger than 64 bits (overflow) - // and -n is the number of bytes read - n = -n - return i, n, errors.New("EOF decoding varint") - } - return i, n, nil -} - -// EncodeBytes writes a varint length-prefixed byte slice to the writer. -func EncodeBytes(w io.Writer, bz []byte) error { - err := EncodeUvarint(w, uint64(len(bz))) - if err != nil { - return err - } - _, err = w.Write(bz) - return err -} - -// EncodeBytesSlice length-prefixes the byte slice and returns it. -func EncodeBytesSlice(bz []byte) ([]byte, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - - err := EncodeBytes(buf, bz) - - bytesCopy := make([]byte, buf.Len()) - copy(bytesCopy, buf.Bytes()) - - return bytesCopy, err -} - -// EncodeBytesSize returns the byte size of the given slice including length-prefixing. -func EncodeBytesSize(bz []byte) int { - return EncodeUvarintSize(uint64(len(bz))) + len(bz) -} - -// EncodeUvarint writes a varint-encoded unsigned integer to an io.Writer. -func EncodeUvarint(w io.Writer, u uint64) error { - // See comment in encodeVarint - buf := uvarintPool.Get().(*[binary.MaxVarintLen64]byte) - - n := binary.PutUvarint(buf[:], u) - _, err := w.Write(buf[0:n]) - - uvarintPool.Put(buf) - - return err -} - -// EncodeUvarintSize returns the byte size of the given integer as a varint. -func EncodeUvarintSize(u uint64) int { - if u == 0 { - return 1 - } - return (bits.Len64(u) + 6) / 7 -} - -// EncodeVarint writes a varint-encoded integer to an io.Writer. -func EncodeVarint(w io.Writer, i int64) error { - // Use a pool here to reduce allocations. - // - // Though this allocates just 10 bytes on the stack, doing allocation for every calls - // cost us a huge memory. The profiling show that using pool save us ~30% memory. - // - // Since when we don't have concurrent access to the pool, the speed will nearly identical. - // If we need to support concurrent access, we can accept a *[binary.MaxVarintLen64]byte as - // input, so the caller can allocate just one and pass the same array pointer to each call. - buf := varintPool.Get().(*[binary.MaxVarintLen64]byte) - - n := binary.PutVarint(buf[:], i) - _, err := w.Write(buf[0:n]) - - varintPool.Put(buf) - - return err -} - -// EncodeVarintSize returns the byte size of the given integer as a varint. -func EncodeVarintSize(i int64) int { - ux := uint64(i) << 1 - if i < 0 { - ux = ^ux - } - return EncodeUvarintSize(ux) -} diff --git a/store/v2/internal/util.go b/store/v2/internal/util.go deleted file mode 100644 index c09c6fd025..0000000000 --- a/store/v2/internal/util.go +++ /dev/null @@ -1,7 +0,0 @@ -package internal - -import "strings" - -func IsMemoryStoreKey(key string) bool { - return strings.HasPrefix(key, "memory:") -} diff --git a/store/v2/metrics/metrics.go b/store/v2/metrics/metrics.go deleted file mode 100644 index bea5eda8a4..0000000000 --- a/store/v2/metrics/metrics.go +++ /dev/null @@ -1,47 +0,0 @@ -package metrics - -import ( - "fmt" - "time" - - "github.com/hashicorp/go-metrics" -) - -var _ StoreMetrics = Metrics{} - -// StoreMetrics defines the set of supported metric APIs for the store package. -type StoreMetrics interface { - MeasureSince(start time.Time, keys ...string) -} - -// Metrics defines a default StoreMetrics implementation. -type Metrics struct { - Labels []metrics.Label -} - -// NewMetrics returns a new instance of the Metrics with labels set by the node -// operator. -func NewMetrics(labels [][]string) (Metrics, error) { - m := Metrics{} - - if numGlobalLabels := len(labels); numGlobalLabels > 0 { - parsedGlobalLabels := make([]metrics.Label, numGlobalLabels) - for i, label := range labels { - if len(label) != 2 { - return Metrics{}, fmt.Errorf("invalid global label length; expected 2, got %d", len(label)) - } - - parsedGlobalLabels[i] = metrics.Label{Name: label[0], Value: label[1]} - } - - m.Labels = parsedGlobalLabels - } - - return m, nil -} - -// MeasureSince provides a wrapper functionality for emitting a time measure -// metric with global labels (if any). -func (m Metrics) MeasureSince(start time.Time, keys ...string) { - metrics.MeasureSinceWithLabels(keys, start.UTC(), m.Labels) -} diff --git a/store/v2/migration/manager.go b/store/v2/migration/manager.go deleted file mode 100644 index bd636dc3c6..0000000000 --- a/store/v2/migration/manager.go +++ /dev/null @@ -1,276 +0,0 @@ -package migration - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "sync" - "time" - - "golang.org/x/sync/errgroup" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/internal/encoding" - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" - "cosmossdk.io/store/v2/storage" -) - -const ( - // defaultChannelBufferSize is the default buffer size for the migration stream. - defaultChannelBufferSize = 1024 - // defaultStorageBufferSize is the default buffer size for the storage snapshotter. - defaultStorageBufferSize = 1024 - - migrateChangesetKeyFmt = "m/cs_%x" // m/cs_ -) - -// VersionedChangeset is a pair of version and Changeset. -type VersionedChangeset struct { - Version uint64 - Changeset *corestore.Changeset -} - -// Manager manages the migration of the whole state from store/v1 to store/v2. -type Manager struct { - logger log.Logger - snapshotsManager *snapshots.Manager - - stateStorage *storage.StorageStore - stateCommitment *commitment.CommitStore - - db corestore.KVStoreWithBatch - mtx sync.Mutex // mutex for migratedVersion - migratedVersion uint64 - - chChangeset <-chan *VersionedChangeset - chDone <-chan struct{} -} - -// NewManager returns a new Manager. -// -// NOTE: `sc` can be `nil` if don't want to migrate the commitment. -func NewManager(db corestore.KVStoreWithBatch, sm *snapshots.Manager, ss *storage.StorageStore, sc *commitment.CommitStore, logger log.Logger) *Manager { - return &Manager{ - logger: logger, - snapshotsManager: sm, - stateStorage: ss, - stateCommitment: sc, - db: db, - } -} - -// Start starts the whole migration process. -// It migrates the whole state at the given version to the new store/v2 (both SC and SS). -// It also catches up the Changesets which are committed while the migration is in progress. -// `chChangeset` is the channel to receive the committed Changesets from the RootStore. -// `chDone` is the channel to receive the done signal from the RootStore. -// NOTE: It should be called by the RootStore, running in the background. -func (m *Manager) Start(version uint64, chChangeset <-chan *VersionedChangeset, chDone <-chan struct{}) error { - m.chChangeset = chChangeset - m.chDone = chDone - - go func() { - if err := m.writeChangeset(); err != nil { - m.logger.Error("failed to write changeset", "err", err) - } - }() - - if err := m.Migrate(version); err != nil { - return fmt.Errorf("failed to migrate state: %w", err) - } - - return m.Sync() -} - -// GetStateCommitment returns the state commitment. -func (m *Manager) GetStateCommitment() *commitment.CommitStore { - return m.stateCommitment -} - -// Migrate migrates the whole state at the given height to the new store/v2. -func (m *Manager) Migrate(height uint64) error { - // create the migration stream and snapshot, - // which acts as protoio.Reader and snapshots.WriteCloser. - ms := NewMigrationStream(defaultChannelBufferSize) - - if err := m.snapshotsManager.CreateMigration(height, ms); err != nil { - return err - } - - // restore the snapshot - chStorage := make(chan *corestore.StateChanges, defaultStorageBufferSize) - - eg := new(errgroup.Group) - eg.Go(func() error { - return m.stateStorage.Restore(height, chStorage) - }) - eg.Go(func() error { - defer close(chStorage) - if m.stateCommitment != nil { - if _, err := m.stateCommitment.Restore(height, 0, ms, chStorage); err != nil { - return err - } - } else { // there is no commitment migration, just consume the stream to restore the state storage - var storeKey []byte - loop: - for { - snapshotItem := snapshotstypes.SnapshotItem{} - err := ms.ReadMsg(&snapshotItem) - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return fmt.Errorf("failed to read snapshot item: %w", err) - } - switch item := snapshotItem.Item.(type) { - case *snapshotstypes.SnapshotItem_Store: - storeKey = []byte(item.Store.Name) - case *snapshotstypes.SnapshotItem_IAVL: - if item.IAVL.Height == 0 { // only restore the leaf nodes - key := item.IAVL.Key - if key == nil { - key = []byte{} - } - value := item.IAVL.Value - if value == nil { - value = []byte{} - } - chStorage <- &corestore.StateChanges{ - Actor: storeKey, - StateChanges: []corestore.KVPair{ - { - Key: key, - Value: value, - }, - }, - } - } - default: - break loop - } - } - } - return nil - }) - - if err := eg.Wait(); err != nil { - return err - } - - m.mtx.Lock() - m.migratedVersion = height - m.mtx.Unlock() - - return nil -} - -// writeChangeset writes the Changeset to the db. -func (m *Manager) writeChangeset() error { - for vc := range m.chChangeset { - cs := vc.Changeset - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, vc.Version) - csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) - csBytes, err := encoding.MarshalChangeset(cs) - if err != nil { - return fmt.Errorf("failed to marshal changeset: %w", err) - } - - batch := m.db.NewBatch() - // Invoking this code in a closure so that defer is called immediately on return - // yet not in the for-loop which can leave resource lingering. - err = func() error { - defer batch.Close() - - if err := batch.Set(csKey, csBytes); err != nil { - return fmt.Errorf("failed to write changeset to db.Batch: %w", err) - } - if err := batch.Write(); err != nil { - return fmt.Errorf("failed to write changeset to db: %w", err) - } - return nil - }() - if err != nil { - return err - } - } - - return nil -} - -// GetMigratedVersion returns the migrated version. -// It is used to check the migrated version in the RootStore. -func (m *Manager) GetMigratedVersion() uint64 { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.migratedVersion -} - -// Sync catches up the Changesets which are committed while the migration is in progress. -// It should be called after the migration is done. -func (m *Manager) Sync() error { - version := m.GetMigratedVersion() - if version == 0 { - return fmt.Errorf("migration is not done yet") - } - version += 1 - - for { - select { - case <-m.chDone: - return nil - default: - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, version) - csKey := []byte(fmt.Sprintf(migrateChangesetKeyFmt, buf)) - csBytes, err := m.db.Get(csKey) - if err != nil { - return fmt.Errorf("failed to get changeset from db: %w", err) - } - if csBytes == nil { - // wait for the next changeset - time.Sleep(100 * time.Millisecond) - continue - } - - cs := corestore.NewChangeset() - if err := encoding.UnmarshalChangeset(cs, csBytes); err != nil { - return fmt.Errorf("failed to unmarshal changeset: %w", err) - } - if m.stateCommitment != nil { - if err := m.stateCommitment.WriteChangeset(cs); err != nil { - return fmt.Errorf("failed to write changeset to commitment: %w", err) - } - if _, err := m.stateCommitment.Commit(version); err != nil { - return fmt.Errorf("failed to commit changeset to commitment: %w", err) - } - } - if err := m.stateStorage.ApplyChangeset(version, cs); err != nil { - return fmt.Errorf("failed to write changeset to storage: %w", err) - } - - m.mtx.Lock() - m.migratedVersion = version - m.mtx.Unlock() - - version += 1 - } - } -} - -// Close closes the manager. It should be called after the migration is done. -// It will close the db and notify the snapshotsManager that the migration is done. -func (m *Manager) Close() error { - if err := m.db.Close(); err != nil { - return fmt.Errorf("failed to close db: %w", err) - } - if m.stateCommitment != nil { - m.snapshotsManager.EndMigration(m.stateCommitment) - } - - return nil -} diff --git a/store/v2/migration/manager_test.go b/store/v2/migration/manager_test.go deleted file mode 100644 index d8365ce075..0000000000 --- a/store/v2/migration/manager_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package migration - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" -) - -var storeKeys = []string{"store1", "store2"} - -func setupMigrationManager(t *testing.T, noCommitStore bool) (*Manager, *commitment.CommitStore) { - t.Helper() - - db := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(db, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) - } - - commitStore, err := commitment.NewCommitStore(multiTrees, db, coretesting.NewNopLogger()) - require.NoError(t, err) - - snapshotsStore, err := snapshots.NewStore(t.TempDir()) - require.NoError(t, err) - - snapshotsManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), commitStore, nil, nil, coretesting.NewNopLogger()) - - storageDB, err := pebbledb.New(t.TempDir()) - require.NoError(t, err) - newStorageStore := storage.NewStorageStore(storageDB, coretesting.NewNopLogger()) // for store/v2 - - db1 := dbm.NewMemDB() - multiTrees1 := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(db1, []byte(storeKey)) - multiTrees1[storeKey] = iavl.NewIavlTree(prefixDB, coretesting.NewNopLogger(), iavl.DefaultConfig()) - } - - newCommitStore, err := commitment.NewCommitStore(multiTrees1, db1, coretesting.NewNopLogger()) // for store/v2 - require.NoError(t, err) - if noCommitStore { - newCommitStore = nil - } - - return NewManager(db, snapshotsManager, newStorageStore, newCommitStore, coretesting.NewNopLogger()), commitStore -} - -func TestMigrateState(t *testing.T) { - for _, noCommitStore := range []bool{false, true} { - t.Run(fmt.Sprintf("Migrate noCommitStore=%v", noCommitStore), func(t *testing.T) { - m, orgCommitStore := setupMigrationManager(t, noCommitStore) - - // apply changeset - toVersion := uint64(100) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset() - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - require.NoError(t, orgCommitStore.WriteChangeset(cs)) - _, err := orgCommitStore.Commit(version) - require.NoError(t, err) - } - - err := m.Migrate(toVersion - 1) - require.NoError(t, err) - - // expecting error for conflicting process, since Migrate trigger snapshotter create migration, - // which start a snapshot process already. - _, err = m.snapshotsManager.Create(toVersion - 1) - require.Error(t, err) - - if m.stateCommitment != nil { - // check the migrated state - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateCommitment.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } - // check the latest state - val, err := m.stateCommitment.Get([]byte("store1"), toVersion-1, []byte("key-100-1")) - require.NoError(t, err) - require.Nil(t, val) - val, err = m.stateCommitment.Get([]byte("store2"), toVersion-1, []byte("key-100-0")) - require.NoError(t, err) - require.Nil(t, val) - } - - // check the storage - for version := uint64(1); version < toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - val, err := m.stateStorage.Get([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", version, i))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("value-%d-%d", version, i)), val) - } - } - } - }) - } -} diff --git a/store/v2/migration/stream.go b/store/v2/migration/stream.go deleted file mode 100644 index aead5df348..0000000000 --- a/store/v2/migration/stream.go +++ /dev/null @@ -1,79 +0,0 @@ -package migration - -import ( - "fmt" - "io" - "sync/atomic" - - protoio "github.com/cosmos/gogoproto/io" - "github.com/cosmos/gogoproto/proto" - - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -var ( - _ snapshots.WriteCloser = (*MigrationStream)(nil) - _ protoio.ReadCloser = (*MigrationStream)(nil) -) - -// MigrationStream is a stream for migrating the whole IAVL state as a snapshot. -// It's used to sync the whole state from the store/v1 to store/v2. -// The main idea is to use the same snapshotter interface without writing to disk. -type MigrationStream struct { - chBuffer chan proto.Message - err atomic.Value // atomic error -} - -// NewMigrationStream returns a new MigrationStream. -func NewMigrationStream(chBufferSize int) *MigrationStream { - return &MigrationStream{ - chBuffer: make(chan proto.Message, chBufferSize), - } -} - -// WriteMsg implements protoio.Write interface. -func (ms *MigrationStream) WriteMsg(msg proto.Message) error { - ms.chBuffer <- msg - return nil -} - -// CloseWithError implements snapshots.WriteCloser interface. -func (ms *MigrationStream) CloseWithError(err error) { - ms.err.Store(err) - close(ms.chBuffer) -} - -// ReadMsg implements the protoio.Read interface. -// -// NOTE: It we follow the pattern of snapshot.Restore, however, the migration is done in memory. -// It doesn't require any deserialization -- just passing the pointer to the . -func (ms *MigrationStream) ReadMsg(msg proto.Message) error { - // msg should be a pointer to the same type as the one written to the stream - snapshotsItem, ok := msg.(*snapshotstypes.SnapshotItem) - if !ok { - return fmt.Errorf("unexpected message type: %T", msg) - } - - // It doesn't require any deserialization, just a type assertion. - item := <-ms.chBuffer - if item == nil { - return io.EOF - } - - *snapshotsItem = *(item.(*snapshotstypes.SnapshotItem)) - - // check if there is an error from the writer. - err := ms.err.Load() - if err != nil { - return err.(error) - } - - return nil -} - -// Close implements io.Closer interface. -func (ms *MigrationStream) Close() error { - close(ms.chBuffer) - return nil -} diff --git a/store/v2/options.go b/store/v2/options.go deleted file mode 100644 index 9c20d84942..0000000000 --- a/store/v2/options.go +++ /dev/null @@ -1,83 +0,0 @@ -package store - -type PruningStrategy int - -const ( - // PruningDefault defines a pruning strategy where the last 362880 heights are - // kept where to-be pruned heights are pruned at every 10th height. - // The last 362880 heights are kept(approximately 3.5 weeks worth of state) assuming the typical - // block time is 6s. If these values do not match the applications' requirements, use the "custom" option. - PruningDefault PruningStrategy = iota - // PruningEverything defines a pruning strategy where all committed heights are - // deleted, storing only the current height and last 2 states. To-be pruned heights are - // pruned at every 10th height. - PruningEverything - // PruningNothing defines a pruning strategy where all heights are kept on disk. - // This is the only stretegy where KeepEvery=1 is allowed with state-sync snapshots disabled. - PruningNothing -) - -// PruningOption defines the pruning configuration. -type PruningOption struct { - // KeepRecent sets the number of recent versions to keep. - KeepRecent uint64 - - // Interval sets the number of how often to prune. - // If set to 0, no pruning will be done. - Interval uint64 -} - -// NewPruningOption returns a new PruningOption instance based on the given pruning strategy. -func NewPruningOption(pruningStrategy PruningStrategy) *PruningOption { - switch pruningStrategy { - case PruningDefault: - return &PruningOption{ - KeepRecent: 362880, - Interval: 10, - } - case PruningEverything: - return &PruningOption{ - KeepRecent: 2, - Interval: 10, - } - case PruningNothing: - return &PruningOption{ - KeepRecent: 0, - Interval: 0, - } - default: - return nil - } -} - -// NewPruningOptionWithCustom returns a new PruningOption based on the given parameters. -func NewPruningOptionWithCustom(keepRecent, interval uint64) *PruningOption { - return &PruningOption{ - KeepRecent: keepRecent, - Interval: interval, - } -} - -// ShouldPrune returns true if the given version should be pruned. -// If true, it also returns the version to prune up to. -// NOTE: The current version is not pruned. -func (opts *PruningOption) ShouldPrune(version uint64) (bool, uint64) { - if opts.Interval == 0 { - return false, 0 - } - - if version <= opts.KeepRecent { - return false, 0 - } - - if version%opts.Interval == 0 { - return true, version - opts.KeepRecent - 1 - } - - return false, 0 -} - -// DBOptions defines the interface of a database options. -type DBOptions interface { - Get(string) interface{} -} diff --git a/store/v2/proof/commit_info.go b/store/v2/proof/commit_info.go deleted file mode 100644 index d95c152b46..0000000000 --- a/store/v2/proof/commit_info.go +++ /dev/null @@ -1,218 +0,0 @@ -package proof - -import ( - "bytes" - "fmt" - "sort" - "time" - - "cosmossdk.io/store/v2/internal/encoding" -) - -type ( - // CommitInfo defines commit information used by the multi-store when committing - // a version/height. - CommitInfo struct { - Version uint64 - StoreInfos []StoreInfo - Timestamp time.Time - CommitHash []byte - } - - // StoreInfo defines store-specific commit information. It contains a reference - // between a store name/key and the commit ID. - StoreInfo struct { - Name []byte - CommitID CommitID - Structure string - } - - // CommitID defines the commitment information when a specific store is - // committed. - CommitID struct { - Version uint64 - Hash []byte - } -) - -func (si StoreInfo) GetHash() []byte { - return si.CommitID.Hash -} - -// Hash returns the root hash of all committed stores represented by CommitInfo, -// sorted by store name/key. -func (ci *CommitInfo) Hash() []byte { - if len(ci.StoreInfos) == 0 { - return nil - } - - if len(ci.CommitHash) != 0 { - return ci.CommitHash - } - - rootHash, _, _ := ci.GetStoreProof([]byte{}) - return rootHash -} - -// GetStoreCommitID returns the CommitID for the given store key. -func (ci *CommitInfo) GetStoreCommitID(storeKey []byte) CommitID { - for _, si := range ci.StoreInfos { - if bytes.Equal(si.Name, storeKey) { - return si.CommitID - } - } - return CommitID{} -} - -// GetStoreProof takes in a storeKey and returns a proof of the store key in addition -// to the root hash it should be proved against. If an empty string is provided, the first -// store based on lexographical ordering will be proved. -func (ci *CommitInfo) GetStoreProof(storeKey []byte) ([]byte, *CommitmentOp, error) { - sort.Slice(ci.StoreInfos, func(i, j int) bool { - return bytes.Compare(ci.StoreInfos[i].Name, ci.StoreInfos[j].Name) < 0 - }) - - index := 0 - leaves := make([][]byte, len(ci.StoreInfos)) - for i, si := range ci.StoreInfos { - var err error - leaves[i], err = LeafHash(si.Name, si.GetHash()) - if err != nil { - return nil, nil, err - } - if bytes.Equal(si.Name, storeKey) { - index = i - } - } - - rootHash, inners := ProofFromByteSlices(leaves, index) - commitmentOp := ConvertCommitmentOp(inners, storeKey, ci.StoreInfos[index].GetHash()) - - return rootHash, &commitmentOp, nil -} - -// encodedSize returns the encoded size of CommitInfo for preallocation in Marshal. -func (ci *CommitInfo) encodedSize() int { - size := encoding.EncodeUvarintSize(ci.Version) - size += encoding.EncodeVarintSize(ci.Timestamp.UnixNano()) - size += encoding.EncodeUvarintSize(uint64(len(ci.StoreInfos))) - for _, storeInfo := range ci.StoreInfos { - size += encoding.EncodeBytesSize(storeInfo.Name) - size += encoding.EncodeBytesSize(storeInfo.CommitID.Hash) - size += encoding.EncodeBytesSize([]byte(storeInfo.Structure)) - } - return size -} - -// Marshal returns the encoded byte representation of CommitInfo. -// NOTE: CommitInfo is encoded as follows: -// - version (uvarint) -// - timestamp (varint) -// - number of stores (uvarint) -// - for each store: -// - store name (bytes) -// - store hash (bytes) -// - store commit structure (bytes) -func (ci *CommitInfo) Marshal() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(ci.encodedSize()) - - if err := encoding.EncodeUvarint(&buf, ci.Version); err != nil { - return nil, err - } - if err := encoding.EncodeVarint(&buf, ci.Timestamp.UnixNano()); err != nil { - return nil, err - } - if err := encoding.EncodeUvarint(&buf, uint64(len(ci.StoreInfos))); err != nil { - return nil, err - } - for _, si := range ci.StoreInfos { - if err := encoding.EncodeBytes(&buf, si.Name); err != nil { - return nil, err - } - if err := encoding.EncodeBytes(&buf, si.CommitID.Hash); err != nil { - return nil, err - } - if err := encoding.EncodeBytes(&buf, []byte(si.Structure)); err != nil { - return nil, err - } - } - - return buf.Bytes(), nil -} - -// Unmarshal unmarshals the encoded byte representation of CommitInfo. -func (ci *CommitInfo) Unmarshal(buf []byte) error { - // Version - version, n, err := encoding.DecodeUvarint(buf) - if err != nil { - return err - } - buf = buf[n:] - ci.Version = version - // Timestamp - timestamp, n, err := encoding.DecodeVarint(buf) - if err != nil { - return err - } - buf = buf[n:] - ci.Timestamp = time.Unix(timestamp/int64(time.Second), timestamp%int64(time.Second)) - // StoreInfos - storeInfosLen, n, err := encoding.DecodeUvarint(buf) - if err != nil { - return err - } - buf = buf[n:] - ci.StoreInfos = make([]StoreInfo, storeInfosLen) - for i := 0; i < int(storeInfosLen); i++ { - // Name - name, n, err := encoding.DecodeBytes(buf) - if err != nil { - return err - } - buf = buf[n:] - ci.StoreInfos[i].Name = name - // CommitID - hash, n, err := encoding.DecodeBytes(buf) - if err != nil { - return err - } - buf = buf[n:] - // Structure - structure, n, err := encoding.DecodeBytes(buf) - if err != nil { - return err - } - buf = buf[n:] - ci.StoreInfos[i].Structure = string(structure) - - ci.StoreInfos[i].CommitID = CommitID{ - Hash: hash, - Version: ci.Version, - } - } - - return nil -} - -func (ci *CommitInfo) CommitID() CommitID { - return CommitID{ - Version: ci.Version, - Hash: ci.Hash(), - } -} - -func (m *CommitInfo) GetVersion() uint64 { - if m != nil { - return m.Version - } - return 0 -} - -func (cid CommitID) String() string { - return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version) -} - -func (cid CommitID) IsZero() bool { - return cid.Version == 0 && len(cid.Hash) == 0 -} diff --git a/store/v2/proof/commit_info_test.go b/store/v2/proof/commit_info_test.go deleted file mode 100644 index e09449c519..0000000000 --- a/store/v2/proof/commit_info_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package proof - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestGetStoreProof(t *testing.T) { - tests := []struct { - storeInfos []StoreInfo - }{ - {[]StoreInfo{ - {[]byte("key1"), CommitID{1, []byte("value1")}, "iavl"}, - }}, - {[]StoreInfo{ - {[]byte("key2"), CommitID{1, []byte("value2")}, "iavl"}, - {[]byte("key1"), CommitID{1, []byte("value1")}, "iavl"}, - }}, - {[]StoreInfo{ - {[]byte("key3"), CommitID{1, []byte("value3")}, "iavl"}, - {[]byte("key2"), CommitID{1, []byte("value2")}, "iavl"}, - {[]byte("key1"), CommitID{1, []byte("value1")}, "iavl"}, - }}, - {[]StoreInfo{ - {[]byte("key2"), CommitID{1, []byte("value2")}, "iavl"}, - {[]byte("key1"), CommitID{1, []byte("value1")}, "iavl"}, - {[]byte("key3"), CommitID{1, []byte("value3")}, "iavl"}, - }}, - {[]StoreInfo{ - {[]byte("key4"), CommitID{1, []byte("value4")}, "iavl"}, - {[]byte("key1"), CommitID{1, []byte("value1")}, "iavl"}, - {[]byte("key3"), CommitID{1, []byte("value3")}, "iavl"}, - {[]byte("key2"), CommitID{1, []byte("value2")}, "iavl"}, - }}, - } - - for i, tc := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - // create a commit info - ci := CommitInfo{ - Version: 1, - Timestamp: time.Now(), - StoreInfos: tc.storeInfos, - } - commitHash := ci.Hash() - // make sure the store infos are sorted - require.Equal(t, ci.StoreInfos[0].Name, []byte("key1")) - for _, si := range tc.storeInfos { - // get the proof - _, proof, err := ci.GetStoreProof(si.Name) - require.NoError(t, err, "test case %d", i) - // verify the proof - expRoots, err := proof.Run([][]byte{si.CommitID.Hash}) - require.NoError(t, err, "test case %d", i) - require.Equal(t, commitHash, expRoots[0], "test case %d", i) - - bz, err := ci.Marshal() - require.NoError(t, err) - var ci2 CommitInfo - err = ci2.Unmarshal(bz) - require.NoError(t, err) - require.True(t, ci.Timestamp.Equal(ci2.Timestamp)) - ci2.Timestamp = ci.Timestamp - require.Equal(t, ci, ci2) - } - }) - } -} diff --git a/store/v2/proof/proof.go b/store/v2/proof/proof.go deleted file mode 100644 index 120cf3d2a1..0000000000 --- a/store/v2/proof/proof.go +++ /dev/null @@ -1,230 +0,0 @@ -package proof - -import ( - "crypto/sha256" - - ics23 "github.com/cosmos/ics23/go" - - "cosmossdk.io/errors" - storeerrors "cosmossdk.io/store/v2/errors" -) - -// Proof operation types -const ( - ProofOpIAVLCommitment = "ics23:iavl" - ProofOpSimpleMerkleCommitment = "ics23:simple" - ProofOpSMTCommitment = "ics23:smt" -) - -var ( - leafPrefix = []byte{0} - innerPrefix = []byte{1} - - // SimpleMerkleSpec is the ics23 proof spec for simple merkle proofs. - SimpleMerkleSpec = &ics23.ProofSpec{ - LeafSpec: &ics23.LeafOp{ - Prefix: leafPrefix, - PrehashKey: ics23.HashOp_NO_HASH, - PrehashValue: ics23.HashOp_SHA256, - Hash: ics23.HashOp_SHA256, - Length: ics23.LengthOp_VAR_PROTO, - }, - InnerSpec: &ics23.InnerSpec{ - ChildOrder: []int32{0, 1}, - MinPrefixLength: 1, - MaxPrefixLength: 1, - ChildSize: 32, - Hash: ics23.HashOp_SHA256, - }, - } -) - -// CommitmentOp implements merkle.ProofOperator by wrapping an ics23 CommitmentProof. -// It also contains a Key field to determine which key the proof is proving. -// NOTE: CommitmentProof currently can either be ExistenceProof or NonexistenceProof -// -// Type and Spec are classified by the kind of merkle proof it represents allowing -// the code to be reused by more types. Spec is never on the wire, but mapped -// from type in the code. -type CommitmentOp struct { - Type string - Key []byte - Spec *ics23.ProofSpec - Proof *ics23.CommitmentProof -} - -func NewIAVLCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpIAVLCommitment, - Spec: ics23.IavlSpec, - Key: key, - Proof: proof, - } -} - -func NewSimpleMerkleCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpSimpleMerkleCommitment, - Spec: SimpleMerkleSpec, - Key: key, - Proof: proof, - } -} - -func NewSMTCommitmentOp(key []byte, proof *ics23.CommitmentProof) CommitmentOp { - return CommitmentOp{ - Type: ProofOpSMTCommitment, - Spec: ics23.SmtSpec, - Key: key, - Proof: proof, - } -} - -func (op CommitmentOp) GetKey() []byte { - return op.Key -} - -// Run takes in a list of arguments and attempts to run the proof op against these -// arguments. Returns the root wrapped in [][]byte if the proof op succeeds with -// given args. If not, it will return an error. -// -// CommitmentOp will accept args of length 1 or length 0. If length 1 args is -// passed in, then CommitmentOp will attempt to prove the existence of the key -// with the value provided by args[0] using the embedded CommitmentProof and returns -// the CommitmentRoot of the proof. If length 0 args is passed in, then CommitmentOp -// will attempt to prove the absence of the key in the CommitmentOp and return the -// CommitmentRoot of the proof. -func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) { - // calculate root from proof - root, err := op.Proof.Calculate() - if err != nil { - return nil, errors.Wrapf(storeerrors.ErrInvalidProof, "could not calculate root for proof: %v", err) - } - - // Only support an existence proof or nonexistence proof (batch proofs currently unsupported) - switch len(args) { - case 0: - // Args are nil, so we verify the absence of the key. - absent := ics23.VerifyNonMembership(op.Spec, root, op.Proof, op.Key) - if !absent { - return nil, errors.Wrapf(storeerrors.ErrInvalidProof, "proof did not verify absence of key: %s", string(op.Key)) - } - - case 1: - // Args is length 1, verify existence of key with value args[0] - if !ics23.VerifyMembership(op.Spec, root, op.Proof, op.Key, args[0]) { - return nil, errors.Wrapf(storeerrors.ErrInvalidProof, "proof did not verify existence of key %s with given value %x", op.Key, args[0]) - } - - default: - return nil, errors.Wrapf(storeerrors.ErrInvalidProof, "args must be length 0 or 1, got: %d", len(args)) - } - - return [][]byte{root}, nil -} - -// ProofFromByteSlices computes the proof from the given leaves. An iteration will be -// performed for each level of the tree, where each iteration hashes together the bottom most -// nodes. If the length of the bottom most nodes is odd, then the last node will be saved -// for the next iteration. -// -// Example: -// Iteration 1: -// n = 5 -// leaves = a, b, c, d, e. -// index = 2 (prove c) -// -// Iteration 2: -// n = 3 -// leaves = ab, cd, e -// index = 1 (prove c, so index of cd) -// -// Iteration 3: -// n = 2 -// leaves = abcd, e -// index = 0 (prove c, so index of abcd) -// -// Final iteration: -// n = 1 -// leaves = abcde -// index = 0 -// -// The bitwise & operator allows us to determine if the index or length is odd or even. -// The bitwise ^ operator allows us to increment when the value is even and decrement when it is odd. -func ProofFromByteSlices(leaves [][]byte, index int) (rootHash []byte, inners []*ics23.InnerOp) { - if len(leaves) == 0 { - return emptyHash(), nil - } - - n := len(leaves) - for n > 1 { - // Begin by constructing the proof for the inner node of the requested index. - // A proof of the inner node is skipped only in the case where the requested index - // is the last element and it does not have a leaf pair (resulting in it being - // saved until the next iteration). - if index < n-1 || index&1 == 1 { - inner := &ics23.InnerOp{Hash: ics23.HashOp_SHA256} - // If proof index is even then child is from left, suffix is populated - // otherwise, child is from right and the prefix is populated. - if index&1 == 0 { - // inner op(prefix=0x01 | child | suffix=leaves[index+1]) - inner.Prefix = innerPrefix - inner.Suffix = leaves[index^1] // XOR op is index+1 because index is even - } else { - // inner op(prefix=0x01 | leaves[index-1] | child | suffix=nil) - inner.Prefix = append(innerPrefix, leaves[index^1]...) // XOR op is index-1 because index is odd - } - inners = append(inners, inner) - } - - // hash together all leaf pairs - for i := 0; i < n/2; i++ { - leaves[i] = InnerHash(leaves[2*i], leaves[2*i+1]) - } - - // save any leftover leaf for the next iteration - if n&1 == 1 { - leaves[n/2] = leaves[n-1] - } - n = (n + 1) / 2 // n + 1 accounts for any leaves which are added to the next iteration - index /= 2 - } - - rootHash = leaves[0] - return rootHash, inners -} - -// ConvertCommitmentOp converts the given merkle proof into an CommitmentOp. -func ConvertCommitmentOp(inners []*ics23.InnerOp, key, value []byte) CommitmentOp { - return NewSimpleMerkleCommitmentOp(key, &ics23.CommitmentProof{ - Proof: &ics23.CommitmentProof_Exist{ - Exist: &ics23.ExistenceProof{ - Key: key, - Value: value, - Leaf: SimpleMerkleSpec.LeafSpec, - Path: inners, - }, - }, - }) -} - -func emptyHash() []byte { - h := sha256.Sum256([]byte{}) - return h[:] -} - -// LeafHash computes the hash of a leaf node. -func LeafHash(key, value []byte) ([]byte, error) { - return SimpleMerkleSpec.LeafSpec.Apply(key, value) -} - -// InnerHash computes the hash of an inner node as defined by ics23: -// https://github.com/cosmos/ics23/blob/go/v0.10.0/proto/cosmos/ics23/v1/proofs.proto#L130 -func InnerHash(left, right []byte) []byte { - data := make([]byte, len(innerPrefix)+len(left)+len(right)) - n := copy(data, innerPrefix) - n += copy(data[n:], left) - copy(data[n:], right) - h := sha256.Sum256(data) - return h[:] -} diff --git a/store/v2/proof/proof_test.go b/store/v2/proof/proof_test.go deleted file mode 100644 index 57f2525cab..0000000000 --- a/store/v2/proof/proof_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package proof - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestProofFromBytesSlices(t *testing.T) { - tests := []struct { - keys []string - values []string - want string - }{ - {[]string{}, []string{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - {[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"}, - {[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"}, - // swap order with 2 keys - { - []string{"key1", "key2"}, - []string{"value1", "value2"}, - "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3", - }, - { - []string{"key2", "key1"}, - []string{"value2", "value1"}, - "55d4bce1c53b7d394bd41bbfc2b239cc2e1c7e36423612a97181c47e79bb713c", - }, - // swap order with 3 keys - { - []string{"key1", "key2", "key3"}, - []string{"value1", "value2", "value3"}, - "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc", - }, - { - []string{"key1", "key3", "key2"}, - []string{"value1", "value3", "value2"}, - "443382fbb629e0d50e86d6ea49e22aa4e27ba50262730b0122cec36860c903a2", - }, - } - for i, tc := range tests { - var err error - leaves := make([][]byte, len(tc.keys)) - for j, key := range tc.keys { - leaves[j], err = LeafHash([]byte(key), []byte(tc.values[j])) - require.NoError(t, err) - } - for j := range leaves { - buf := make([][]byte, len(leaves)) - copy(buf, leaves) - rootHash, inners := ProofFromByteSlices(buf, j) - require.Equal(t, tc.want, fmt.Sprintf("%x", rootHash), "test case %d", i) - commitmentOp := ConvertCommitmentOp(inners, []byte(tc.keys[j]), []byte(tc.values[j])) - expRoots, err := commitmentOp.Run([][]byte{[]byte(tc.values[j])}) - require.NoError(t, err) - require.Equal(t, tc.want, fmt.Sprintf("%x", expRoots[0]), "test case %d", i) - } - - } -} diff --git a/store/v2/pruning/README.md b/store/v2/pruning/README.md deleted file mode 100644 index fbf24130c6..0000000000 --- a/store/v2/pruning/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Pruning Manager - -The `pruning` package defines the `PruningManager` struct which is responsible for -pruning the state storage (SS) and the state commitment (SC) based on the current -height of the chain. The `PruningOption` struct defines the configuration for pruning -and is passed to the `PruningManager` during initialization. - -## Prune Options - -The `PruningOption` struct includes the following fields: - -* `KeepRecent` (uint64): The number of recent heights to keep in the state. -* `Interval` (uint64): The interval of how often to prune the state. 0 means no pruning. - -## Pausable Pruner - -The `PausablePruner` interface defines the `PausePruning` method, which is used to pause -the pruning process. The `PruningManager` will check if the pruner is a `PausablePruner` -and call the `PausePruning` method before and after `Commit` to pause and resume pruning. -This is useful for when the pruning process is asynchronous and needs to be paused during -a commit to prevent parallel writes. - -## Pruning Flow - -```mermaid -sequenceDiagram - autonumber - - participant A as RootStore - participant B as PruningManager - participant C as CommitmentStore - participant D as StorageStore - - loop Commit - A->>B: SignalCommit(true, height) - alt SC is PausablePruner - B->>C: PausePruning(true) - else SS is PausablePruner - B->>D: PausePruing(true) - end - A->>C: Commit Changeset - A->>D: Write Changeset - A->>B: SignalCommit(false, height) - alt SC is PausablePruner - B->>C: PausePruning(false) - else SS is PausablePruner - B->>D: PausePruing(false) - end - B->>C: Prune(height) - B->>D: Prune(height) - end -``` diff --git a/store/v2/pruning/manager.go b/store/v2/pruning/manager.go deleted file mode 100644 index 424805bde7..0000000000 --- a/store/v2/pruning/manager.go +++ /dev/null @@ -1,69 +0,0 @@ -package pruning - -import "cosmossdk.io/store/v2" - -// Manager is a struct that manages the pruning of old versions of the SC and SS. -type Manager struct { - // scPruner is the pruner for the SC. - scPruner store.Pruner - // scPruningOption are the pruning options for the SC. - scPruningOption *store.PruningOption - // ssPruner is the pruner for the SS. - ssPruner store.Pruner - // ssPruningOption are the pruning options for the SS. - ssPruningOption *store.PruningOption -} - -// NewManager creates a new Pruning Manager. -func NewManager(scPruner, ssPruner store.Pruner, scPruningOption, ssPruningOption *store.PruningOption) *Manager { - return &Manager{ - scPruner: scPruner, - scPruningOption: scPruningOption, - ssPruner: ssPruner, - ssPruningOption: ssPruningOption, - } -} - -// Prune prunes the SC and SS to the provided version. -// -// NOTE: It can be called outside of the store manually. -func (m *Manager) Prune(version uint64) error { - // Prune the SC. - if m.scPruningOption != nil { - if prune, pruneTo := m.scPruningOption.ShouldPrune(version); prune { - if err := m.scPruner.Prune(pruneTo); err != nil { - return err - } - } - } - - // Prune the SS. - if m.ssPruningOption != nil { - if prune, pruneTo := m.ssPruningOption.ShouldPrune(version); prune { - if err := m.ssPruner.Prune(pruneTo); err != nil { - return err - } - } - } - - return nil -} - -// SignalCommit signals to the manager that a commit has started or finished. -// It is used to trigger the pruning of the SC and SS. -// It pauses or resumes the pruning of the SC and SS if the pruner implements -// the PausablePruner interface. -func (m *Manager) SignalCommit(start bool, version uint64) error { - if scPausablePruner, ok := m.scPruner.(store.PausablePruner); ok { - scPausablePruner.PausePruning(start) - } - if ssPausablePruner, ok := m.ssPruner.(store.PausablePruner); ok { - ssPausablePruner.PausePruning(start) - } - - if !start { - return m.Prune(version) - } - - return nil -} diff --git a/store/v2/pruning/manager_test.go b/store/v2/pruning/manager_test.go deleted file mode 100644 index 607af22bc7..0000000000 --- a/store/v2/pruning/manager_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package pruning - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" -) - -var storeKeys = []string{"store1", "store2", "store3"} - -type PruningManagerTestSuite struct { - suite.Suite - - manager *Manager - sc *commitment.CommitStore - ss *storage.StorageStore -} - -func TestPruningManagerTestSuite(t *testing.T) { - suite.Run(t, &PruningManagerTestSuite{}) -} - -func (s *PruningManagerTestSuite) SetupTest() { - nopLog := coretesting.NewNopLogger() - var err error - - mdb := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) - } - s.sc, err = commitment.NewCommitStore(multiTrees, mdb, nopLog) - s.Require().NoError(err) - - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - s.ss = storage.NewStorageStore(sqliteDB, nopLog) - scPruningOption := store.NewPruningOptionWithCustom(0, 1) // prune all - ssPruningOption := store.NewPruningOptionWithCustom(5, 10) // prune some - s.manager = NewManager(s.sc, s.ss, scPruningOption, ssPruningOption) -} - -func (s *PruningManagerTestSuite) TestPrune() { - // commit changesets with pruning - toVersion := uint64(100) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset() - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - s.Require().NoError(s.sc.WriteChangeset(cs)) - _, err := s.sc.Commit(version) - s.Require().NoError(err) - - s.Require().NoError(s.ss.ApplyChangeset(version, cs)) - - s.Require().NoError(s.manager.Prune(version)) - } - - // wait for the pruning to finish in the commitment store - checkSCPrune := func() bool { - count := 0 - for _, storeKey := range storeKeys { - _, err := s.sc.GetProof([]byte(storeKey), toVersion-1, []byte(fmt.Sprintf("key-%d-%d", toVersion-1, 0))) - if err != nil { - count++ - } - } - - return count == len(storeKeys) - } - s.Require().Eventually(checkSCPrune, 10*time.Second, 1*time.Second) - - // check the storage store - _, pruneVersion := s.manager.ssPruningOption.ShouldPrune(toVersion) - for version := uint64(1); version <= toVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - key := []byte(fmt.Sprintf("key-%d-%d", version, i)) - value, err := s.ss.Get([]byte(storeKey), version, key) - if version <= pruneVersion { - s.Require().Nil(value) - s.Require().Error(err) - } else { - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), value) - } - } - } - } -} - -func TestPruningOption(t *testing.T) { - testCases := []struct { - name string - options *store.PruningOption - version uint64 - pruning bool - pruneVersion uint64 - }{ - { - name: "no pruning", - options: store.NewPruningOptionWithCustom(100, 0), - version: 100, - pruning: false, - pruneVersion: 0, - }, - { - name: "prune all", - options: store.NewPruningOptionWithCustom(0, 1), - version: 19, - pruning: true, - pruneVersion: 18, - }, - { - name: "prune none", - options: store.NewPruningOptionWithCustom(100, 10), - version: 19, - pruning: false, - pruneVersion: 0, - }, - { - name: "prune some", - options: store.NewPruningOptionWithCustom(10, 50), - version: 100, - pruning: true, - pruneVersion: 89, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - pruning, pruneVersion := tc.options.ShouldPrune(tc.version) - require.Equal(t, tc.pruning, pruning) - require.Equal(t, tc.pruneVersion, pruneVersion) - }) - } -} diff --git a/store/v2/root/factory.go b/store/v2/root/factory.go deleted file mode 100644 index 7139f48abf..0000000000 --- a/store/v2/root/factory.go +++ /dev/null @@ -1,126 +0,0 @@ -package root - -import ( - "errors" - "fmt" - "os" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - "cosmossdk.io/store/v2/commitment/mem" - "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/internal" - "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" - "cosmossdk.io/store/v2/storage/sqlite" -) - -type ( - SSType int - SCType int -) - -const ( - SSTypeSQLite SSType = 0 - SSTypePebble SSType = 1 - SSTypeRocks SSType = 2 - SCTypeIavl SCType = 0 - SCTypeIavlV2 SCType = 1 -) - -type FactoryOptions struct { - Logger log.Logger - RootDir string - SSType SSType - SCType SCType - SSPruningOption *store.PruningOption - SCPruningOption *store.PruningOption - IavlConfig *iavl.Config - StoreKeys []string - SCRawDB corestore.KVStoreWithBatch -} - -// CreateRootStore is a convenience function to create a root store based on the -// provided FactoryOptions. Strictly speaking app developers can create the root -// store directly by calling root.New, so this function is not -// necessary, but demonstrates the required steps and configuration to create a root store. -func CreateRootStore(opts *FactoryOptions) (store.RootStore, error) { - var ( - ssDb storage.Database - ss *storage.StorageStore - sc *commitment.CommitStore - err error - ensureDir = func(dir string) error { - if err := os.MkdirAll(dir, 0o0755); err != nil { - return fmt.Errorf("failed to create directory %s: %w", dir, err) - } - return nil - } - ) - - switch opts.SSType { - case SSTypeSQLite: - dir := fmt.Sprintf("%s/data/ss/sqlite", opts.RootDir) - if err = ensureDir(dir); err != nil { - return nil, err - } - ssDb, err = sqlite.New(dir) - case SSTypePebble: - dir := fmt.Sprintf("%s/data/ss/pebble", opts.RootDir) - if err = ensureDir(dir); err != nil { - return nil, err - } - ssDb, err = pebbledb.New(dir) - case SSTypeRocks: - // TODO: rocksdb requires build tags so is not supported here by default - return nil, errors.New("rocksdb not supported") - } - if err != nil { - return nil, err - } - ss = storage.NewStorageStore(ssDb, opts.Logger) - - if len(opts.StoreKeys) == 0 { - metadata := commitment.NewMetadataStore(opts.SCRawDB) - latestVersion, err := metadata.GetLatestVersion() - if err != nil { - return nil, err - } - lastCommitInfo, err := metadata.GetCommitInfo(latestVersion) - if err != nil { - return nil, err - } - if lastCommitInfo == nil { - return nil, fmt.Errorf("tried to construct a root store with no store keys specified but no commit info found for version %d", latestVersion) - } - for _, si := range lastCommitInfo.StoreInfos { - opts.StoreKeys = append(opts.StoreKeys, string(si.Name)) - } - } - - trees := make(map[string]commitment.Tree) - for _, key := range opts.StoreKeys { - if internal.IsMemoryStoreKey(key) { - trees[key] = mem.New() - } else { - switch opts.SCType { - case SCTypeIavl: - trees[key] = iavl.NewIavlTree(db.NewPrefixDB(opts.SCRawDB, []byte(key)), opts.Logger, opts.IavlConfig) - case SCTypeIavlV2: - return nil, errors.New("iavl v2 not supported") - } - } - } - sc, err = commitment.NewCommitStore(trees, opts.SCRawDB, opts.Logger) - if err != nil { - return nil, err - } - - pm := pruning.NewManager(sc, ss, opts.SCPruningOption, opts.SSPruningOption) - - return New(opts.Logger, ss, sc, pm, nil, nil) -} diff --git a/store/v2/root/migrate_test.go b/store/v2/root/migrate_test.go deleted file mode 100644 index 4d1b62b878..0000000000 --- a/store/v2/root/migrate_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package root - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/log" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/migration" - "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" -) - -var storeKeys = []string{"store1", "store2", "store3"} - -type MigrateStoreTestSuite struct { - suite.Suite - - rootStore store.RootStore -} - -func TestMigrateStoreTestSuite(t *testing.T) { - suite.Run(t, &MigrateStoreTestSuite{}) -} - -func (s *MigrateStoreTestSuite) SetupTest() { - testLog := log.NewTestLogger(s.T()) - nopLog := coretesting.NewNopLogger() - - mdb := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, nopLog, iavl.DefaultConfig()) - } - orgSC, err := commitment.NewCommitStore(multiTrees, mdb, testLog) - s.Require().NoError(err) - - // apply changeset against the original store - toVersion := uint64(200) - keyCount := 10 - for version := uint64(1); version <= toVersion; version++ { - cs := corestore.NewChangeset() - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - s.Require().NoError(orgSC.WriteChangeset(cs)) - _, err = orgSC.Commit(version) - s.Require().NoError(err) - } - - // create a new storage and commitment stores - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, testLog) - - multiTrees1 := make(map[string]commitment.Tree) - for _, storeKey := range storeKeys { - multiTrees1[storeKey] = iavl.NewIavlTree(dbm.NewMemDB(), nopLog, iavl.DefaultConfig()) - } - sc, err := commitment.NewCommitStore(multiTrees1, dbm.NewMemDB(), testLog) - s.Require().NoError(err) - - snapshotsStore, err := snapshots.NewStore(s.T().TempDir()) - s.Require().NoError(err) - snapshotManager := snapshots.NewManager(snapshotsStore, snapshots.NewSnapshotOptions(1500, 2), orgSC, nil, nil, testLog) - migrationManager := migration.NewManager(dbm.NewMemDB(), snapshotManager, ss, sc, testLog) - pm := pruning.NewManager(sc, ss, nil, nil) - - // assume no storage store, simulate the migration process - s.rootStore, err = New(testLog, ss, orgSC, pm, migrationManager, nil) - s.Require().NoError(err) -} - -func (s *MigrateStoreTestSuite) TestMigrateState() { - err := s.rootStore.LoadLatestVersion() - s.Require().NoError(err) - originalLatestVersion, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - // check if the Query fallback to the original SC - for version := uint64(1); version <= originalLatestVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < 10; i++ { - res, err := s.rootStore.Query([]byte(storeKey), version, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) - } - } - } - - // continue to apply changeset against the original store - latestVersion := originalLatestVersion + 1 - keyCount := 10 - for ; latestVersion < 2*originalLatestVersion; latestVersion++ { - cs := corestore.NewChangeset() - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", latestVersion, i)), []byte(fmt.Sprintf("value-%d-%d", latestVersion, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - - // check if the migration is completed - ver, err := s.rootStore.GetStateStorage().GetLatestVersion() - s.Require().NoError(err) - if ver == latestVersion { - break - } - - // add some delay to simulate the consensus process - time.Sleep(100 * time.Millisecond) - } - - // check if the migration is successful - version, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(latestVersion, version) - - // query against the migrated store - for version := uint64(1); version <= latestVersion; version++ { - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - targetVersion := version - if version < originalLatestVersion { - targetVersion = originalLatestVersion - } - res, err := s.rootStore.Query([]byte(storeKey), targetVersion, []byte(fmt.Sprintf("key-%d-%d", version, i)), true) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("value-%d-%d", version, i)), res.Value) - } - } - } - - // apply changeset against the migrated store - for version := latestVersion + 1; version <= latestVersion+10; version++ { - cs := corestore.NewChangeset() - for _, storeKey := range storeKeys { - for i := 0; i < keyCount; i++ { - cs.Add([]byte(storeKey), []byte(fmt.Sprintf("key-%d-%d", version, i)), []byte(fmt.Sprintf("value-%d-%d", version, i)), false) - } - } - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - } - - version, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(latestVersion+10, version) -} diff --git a/store/v2/root/reader.go b/store/v2/root/reader.go deleted file mode 100644 index 39737f8122..0000000000 --- a/store/v2/root/reader.go +++ /dev/null @@ -1,71 +0,0 @@ -package root - -import ( - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -var ( - _ corestore.Reader = (*Reader)(nil) - _ corestore.ReaderMap = (*ReaderMap)(nil) -) - -// ReaderMap defines an adapter around a RootStore that only exposes read-only -// operations. This is useful for exposing a read-only view of the RootStore at -// a specific version in history, which could also be the latest state. -type ReaderMap struct { - rootStore store.RootStore - version uint64 -} - -func NewReaderMap(v uint64, rs store.RootStore) *ReaderMap { - return &ReaderMap{ - rootStore: rs, - version: v, - } -} - -func (roa *ReaderMap) GetReader(actor []byte) (corestore.Reader, error) { - return NewReader(roa.version, roa.rootStore, actor), nil -} - -// Reader represents a read-only adapter for accessing data from the root store. -type Reader struct { - version uint64 // The version of the data. - rootStore store.RootStore // The root store to read data from. - actor []byte // The actor associated with the data. -} - -func NewReader(v uint64, rs store.RootStore, actor []byte) *Reader { - return &Reader{ - version: v, - rootStore: rs, - actor: actor, - } -} - -func (roa *Reader) Has(key []byte) (bool, error) { - val, err := roa.rootStore.GetStateStorage().Has(roa.actor, roa.version, key) - if err != nil { - return false, err - } - - return val, nil -} - -func (roa *Reader) Get(key []byte) ([]byte, error) { - result, err := roa.rootStore.GetStateStorage().Get(roa.actor, roa.version, key) - if err != nil { - return nil, err - } - - return result, nil -} - -func (roa *Reader) Iterator(start, end []byte) (corestore.Iterator, error) { - return roa.rootStore.GetStateStorage().Iterator(roa.actor, roa.version, start, end) -} - -func (roa *Reader) ReverseIterator(start, end []byte) (corestore.Iterator, error) { - return roa.rootStore.GetStateStorage().ReverseIterator(roa.actor, roa.version, start, end) -} diff --git a/store/v2/root/store.go b/store/v2/root/store.go deleted file mode 100644 index 55e55fa638..0000000000 --- a/store/v2/root/store.go +++ /dev/null @@ -1,469 +0,0 @@ -package root - -import ( - "bytes" - "crypto/sha256" - "errors" - "fmt" - "sync" - "time" - - "golang.org/x/sync/errgroup" - - coreheader "cosmossdk.io/core/header" - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/metrics" - "cosmossdk.io/store/v2/migration" - "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/pruning" -) - -var _ store.RootStore = (*Store)(nil) - -// Store defines the SDK's default RootStore implementation. It contains a single -// State Storage (SS) backend and a single State Commitment (SC) backend. The SC -// backend may or may not support multiple store keys and is implementation -// dependent. -type Store struct { - logger corelog.Logger - initialVersion uint64 - - // stateStorage reflects the state storage backend - stateStorage store.VersionedDatabase - - // stateCommitment reflects the state commitment (SC) backend - stateCommitment store.Committer - - // commitHeader reflects the header used when committing state - // note, this isn't required and only used for query purposes) - commitHeader *coreheader.Info - - // lastCommitInfo reflects the last version/hash that has been committed - lastCommitInfo *proof.CommitInfo - - // telemetry reflects a telemetry agent responsible for emitting metrics (if any) - telemetry metrics.StoreMetrics - - // pruningManager reflects the pruning manager used to prune state of the SS and SC backends - pruningManager *pruning.Manager - - // Migration related fields - // migrationManager reflects the migration manager used to migrate state from v1 to v2 - migrationManager *migration.Manager - // chChangeset reflects the channel used to send the changeset to the migration manager - chChangeset chan *migration.VersionedChangeset - // chDone reflects the channel used to signal the migration manager that the migration - // is done - chDone chan struct{} - // isMigrating reflects whether the store is currently migrating - isMigrating bool -} - -// New creates a new root Store instance. -// -// NOTE: The migration manager is optional and can be nil if no migration is required. -func New( - logger corelog.Logger, - ss store.VersionedDatabase, - sc store.Committer, - pm *pruning.Manager, - mm *migration.Manager, - m metrics.StoreMetrics, -) (store.RootStore, error) { - return &Store{ - logger: logger, - initialVersion: 1, - stateStorage: ss, - stateCommitment: sc, - pruningManager: pm, - migrationManager: mm, - telemetry: m, - isMigrating: mm != nil, - }, nil -} - -// Close closes the store and resets all internal fields. Note, Close() is NOT -// idempotent and should only be called once. -func (s *Store) Close() (err error) { - err = errors.Join(err, s.stateStorage.Close()) - err = errors.Join(err, s.stateCommitment.Close()) - - s.stateStorage = nil - s.stateCommitment = nil - s.lastCommitInfo = nil - s.commitHeader = nil - - return err -} - -func (s *Store) SetMetrics(m metrics.Metrics) { - s.telemetry = m -} - -func (s *Store) SetInitialVersion(v uint64) error { - s.initialVersion = v - - return s.stateCommitment.SetInitialVersion(v) -} - -func (s *Store) StateLatest() (uint64, corestore.ReaderMap, error) { - v, err := s.GetLatestVersion() - if err != nil { - return 0, nil, err - } - - return v, NewReaderMap(v, s), nil -} - -func (s *Store) StateAt(v uint64) (corestore.ReaderMap, error) { - // TODO(bez): We may want to avoid relying on the SC metadata here. Instead, - // we should add a VersionExists() method to the VersionedDatabase interface. - // - // Ref: https://github.com/cosmos/cosmos-sdk/issues/19091 - if cInfo, err := s.stateCommitment.GetCommitInfo(v); err != nil || cInfo == nil { - return nil, fmt.Errorf("failed to get commit info for version %d: %w", v, err) - } - - return NewReaderMap(v, s), nil -} - -func (s *Store) GetStateStorage() store.VersionedDatabase { - return s.stateStorage -} - -func (s *Store) GetStateCommitment() store.Committer { - return s.stateCommitment -} - -// LastCommitID returns a CommitID based off of the latest internal CommitInfo. -// If an internal CommitInfo is not set, a new one will be returned with only the -// latest version set, which is based off of the SC view. -func (s *Store) LastCommitID() (proof.CommitID, error) { - if s.lastCommitInfo != nil { - return s.lastCommitInfo.CommitID(), nil - } - - latestVersion, err := s.stateCommitment.GetLatestVersion() - if err != nil { - return proof.CommitID{}, err - } - // if the latest version is 0, we return a CommitID with version 0 and a hash of an empty byte slice - bz := sha256.Sum256([]byte{}) - - return proof.CommitID{Version: latestVersion, Hash: bz[:]}, nil -} - -// GetLatestVersion returns the latest version based on the latest internal -// CommitInfo. An error is returned if the latest CommitInfo or version cannot -// be retrieved. -func (s *Store) GetLatestVersion() (uint64, error) { - lastCommitID, err := s.LastCommitID() - if err != nil { - return 0, err - } - - return lastCommitID.Version, nil -} - -func (s *Store) Query(storeKey []byte, version uint64, key []byte, prove bool) (store.QueryResult, error) { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "query") - } - - var val []byte - var err error - if s.isMigrating { // if we're migrating, we need to query the SC backend - val, err = s.stateCommitment.Get(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", err) - } - } else { - val, err = s.stateStorage.Get(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SS store: %w", err) - } - if val == nil { - // fallback to querying SC backend if not found in SS backend - // - // Note, this should only used during migration, i.e. while SS and IAVL v2 - // are being asynchronously synced. - bz, scErr := s.stateCommitment.Get(storeKey, version, key) - if scErr != nil { - return store.QueryResult{}, fmt.Errorf("failed to query SC store: %w", scErr) - } - val = bz - } - } - - result := store.QueryResult{ - Key: key, - Value: val, - Version: version, - } - - if prove { - result.ProofOps, err = s.stateCommitment.GetProof(storeKey, version, key) - if err != nil { - return store.QueryResult{}, fmt.Errorf("failed to get SC store proof: %w", err) - } - } - - return result, nil -} - -func (s *Store) LoadLatestVersion() error { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "load_latest_version") - } - - lv, err := s.GetLatestVersion() - if err != nil { - return err - } - - return s.loadVersion(lv) -} - -func (s *Store) LoadVersion(version uint64) error { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "load_version") - } - - return s.loadVersion(version) -} - -func (s *Store) loadVersion(v uint64) error { - s.logger.Debug("loading version", "version", v) - - if err := s.stateCommitment.LoadVersion(v); err != nil { - return fmt.Errorf("failed to load SC version %d: %w", v, err) - } - - s.commitHeader = nil - - // set lastCommitInfo explicitly s.t. Commit commits the correct version, i.e. v+1 - var err error - s.lastCommitInfo, err = s.stateCommitment.GetCommitInfo(v) - if err != nil { - return fmt.Errorf("failed to get commit info for version %d: %w", v, err) - } - - // if we're migrating, we need to start the migration process - if s.isMigrating { - s.startMigration() - } - - return nil -} - -func (s *Store) SetCommitHeader(h *coreheader.Info) { - s.commitHeader = h -} - -// WorkingHash writes the changeset to SC and SS and returns the workingHash -// of the CommitInfo. -func (s *Store) WorkingHash(cs *corestore.Changeset) ([]byte, error) { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "working_hash") - } - - // write the changeset to the SC and SS backends - eg := new(errgroup.Group) - eg.Go(func() error { - if err := s.writeSC(cs); err != nil { - return fmt.Errorf("failed to write SC: %w", err) - } - - return nil - }) - eg.Go(func() error { - if err := s.stateStorage.ApplyChangeset(s.initialVersion, cs); err != nil { - return fmt.Errorf("failed to commit SS: %w", err) - } - - return nil - }) - if err := eg.Wait(); err != nil { - return nil, err - } - - workingHash := s.lastCommitInfo.Hash() - s.lastCommitInfo.Version -= 1 // reset lastCommitInfo to allow Commit() to work correctly - - return workingHash, nil -} - -// Commit commits all state changes to the underlying SS and SC backends. It -// writes a batch of the changeset to the SC tree, and retrieves the CommitInfo -// from the SC tree. Finally, it commits the SC tree and returns the hash of the -// CommitInfo. -func (s *Store) Commit(cs *corestore.Changeset) ([]byte, error) { - if s.telemetry != nil { - now := time.Now() - defer s.telemetry.MeasureSince(now, "root_store", "commit") - } - - // write the changeset to the SC tree and update lastCommitInfo - if err := s.writeSC(cs); err != nil { - return nil, err - } - - version := s.lastCommitInfo.Version - - if s.commitHeader != nil && uint64(s.commitHeader.Height) != version { - s.logger.Debug("commit header and version mismatch", "header_height", s.commitHeader.Height, "version", version) - } - - // signal to the pruning manager that a new version is about to be committed - // this may be required if the SS and SC backends implementation have the - // background pruning process which must be paused during the commit - if err := s.pruningManager.SignalCommit(true, version); err != nil { - s.logger.Error("failed to signal commit to pruning manager", "err", err) - } - - eg := new(errgroup.Group) - - // if we're migrating, we don't want to commit to the state storage to avoid - // parallel writes - if !s.isMigrating { - // commit SS async - eg.Go(func() error { - if err := s.stateStorage.ApplyChangeset(version, cs); err != nil { - return fmt.Errorf("failed to commit SS: %w", err) - } - - return nil - }) - } - - // commit SC async - eg.Go(func() error { - if err := s.commitSC(); err != nil { - return fmt.Errorf("failed to commit SC: %w", err) - } - - return nil - }) - - if err := eg.Wait(); err != nil { - return nil, err - } - - // signal to the pruning manager that the commit is done - if err := s.pruningManager.SignalCommit(false, version); err != nil { - s.logger.Error("failed to signal commit done to pruning manager", "err", err) - } - - if s.commitHeader != nil { - s.lastCommitInfo.Timestamp = s.commitHeader.Time - } - - return s.lastCommitInfo.Hash(), nil -} - -// startMigration starts a migration process to migrate the RootStore/v1 to the -// SS and SC backends of store/v2 and initializes the channels. -// It runs in a separate goroutine and replaces the current RootStore with the -// migrated new backends once the migration is complete. -// -// NOTE: This method should only be called once after loadVersion. -func (s *Store) startMigration() { - // buffer at most 1 changeset, if the receiver is behind attempting to buffer - // more than 1 will block. - s.chChangeset = make(chan *migration.VersionedChangeset, 1) - // it is used to signal the migration manager that the migration is done - s.chDone = make(chan struct{}) - - mtx := sync.Mutex{} - mtx.Lock() - go func() { - version := s.lastCommitInfo.Version - s.logger.Info("starting migration", "version", version) - mtx.Unlock() - if err := s.migrationManager.Start(version, s.chChangeset, s.chDone); err != nil { - s.logger.Error("failed to start migration", "err", err) - } - }() - - // wait for the migration manager to start - mtx.Lock() - defer mtx.Unlock() -} - -// writeSC accepts a Changeset and writes that as a batch to the underlying SC -// tree, which allows us to retrieve the working hash of the SC tree. Finally, -// we construct a *CommitInfo and set that as lastCommitInfo. Note, this should -// only be called once per block! -// If migration is in progress, the changeset is sent to the migration manager. -func (s *Store) writeSC(cs *corestore.Changeset) error { - if s.isMigrating { - // if the migration manager has already migrated to the version, close the - // channels and replace the state commitment - if s.migrationManager.GetMigratedVersion() == s.lastCommitInfo.Version { - close(s.chDone) - close(s.chChangeset) - s.isMigrating = false - // close the old state commitment and replace it with the new one - if err := s.stateCommitment.Close(); err != nil { - return fmt.Errorf("failed to close the old SC store: %w", err) - } - newStateCommitment := s.migrationManager.GetStateCommitment() - if newStateCommitment != nil { - s.stateCommitment = newStateCommitment - } - if err := s.migrationManager.Close(); err != nil { - return fmt.Errorf("failed to close migration manager: %w", err) - } - s.logger.Info("migration completed", "version", s.lastCommitInfo.Version) - } else { - s.chChangeset <- &migration.VersionedChangeset{Version: s.lastCommitInfo.Version + 1, Changeset: cs} - } - } - - if err := s.stateCommitment.WriteChangeset(cs); err != nil { - return fmt.Errorf("failed to write batch to SC store: %w", err) - } - - var previousHeight, version uint64 - if s.lastCommitInfo.GetVersion() == 0 && s.initialVersion > 1 { - // This case means that no commit has been made in the store, we - // start from initialVersion. - version = s.initialVersion - } else { - // This case can means two things: - // - // 1. There was already a previous commit in the store, in which case we - // increment the version from there. - // 2. There was no previous commit, and initial version was not set, in which - // case we start at version 1. - previousHeight = s.lastCommitInfo.GetVersion() - version = previousHeight + 1 - } - - s.lastCommitInfo = s.stateCommitment.WorkingCommitInfo(version) - - return nil -} - -// commitSC commits the SC store. At this point, a batch of the current changeset -// should have already been written to the SC via writeSC(). This method solely -// commits that batch. An error is returned if commit fails or the hash of the -// committed state does not match the hash of the working state. -func (s *Store) commitSC() error { - cInfo, err := s.stateCommitment.Commit(s.lastCommitInfo.Version) - if err != nil { - return fmt.Errorf("failed to commit SC store: %w", err) - } - - if !bytes.Equal(cInfo.Hash(), s.lastCommitInfo.Hash()) { - return fmt.Errorf("unexpected commit hash; got: %X, expected: %X", cInfo.Hash(), s.lastCommitInfo.Hash()) - } - - return nil -} diff --git a/store/v2/root/store_test.go b/store/v2/root/store_test.go deleted file mode 100644 index b2b640feb9..0000000000 --- a/store/v2/root/store_test.go +++ /dev/null @@ -1,808 +0,0 @@ -package root - -import ( - "crypto/sha256" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/suite" - - coreheader "cosmossdk.io/core/header" - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/commitment" - "cosmossdk.io/store/v2/commitment/iavl" - dbm "cosmossdk.io/store/v2/db" - "cosmossdk.io/store/v2/proof" - "cosmossdk.io/store/v2/pruning" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/sqlite" -) - -const ( - testStoreKey = "test_store_key" - testStoreKey2 = "test_store_key2" - testStoreKey3 = "test_store_key3" -) - -var testStoreKeys = []string{testStoreKey, testStoreKey2, testStoreKey3} - -var ( - testStoreKeyBytes = []byte(testStoreKey) - testStoreKey2Bytes = []byte(testStoreKey2) - testStoreKey3Bytes = []byte(testStoreKey3) -) - -type RootStoreTestSuite struct { - suite.Suite - - rootStore store.RootStore -} - -func TestStorageTestSuite(t *testing.T) { - suite.Run(t, &RootStoreTestSuite{}) -} - -func (s *RootStoreTestSuite) SetupTest() { - noopLog := coretesting.NewNopLogger() - - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, noopLog) - - tree := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) - tree2 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) - tree3 := iavl.NewIavlTree(dbm.NewMemDB(), noopLog, iavl.DefaultConfig()) - sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree, testStoreKey2: tree2, testStoreKey3: tree3}, dbm.NewMemDB(), noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, ss, nil, nil) - rs, err := New(noopLog, ss, sc, pm, nil, nil) - s.Require().NoError(err) - - s.rootStore = rs -} - -func (s *RootStoreTestSuite) newStoreWithPruneConfig(config *store.PruningOption) { - noopLog := coretesting.NewNopLogger() - - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, noopLog) - - mdb := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range testStoreKeys { - prefixDB := dbm.NewPrefixDB(mdb, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) - } - - sc, err := commitment.NewCommitStore(multiTrees, dbm.NewMemDB(), noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, ss, config, config) - - rs, err := New(noopLog, ss, sc, pm, nil, nil) - s.Require().NoError(err) - - s.rootStore = rs -} - -func (s *RootStoreTestSuite) newStoreWithBackendMount(ss store.VersionedDatabase, sc store.Committer, pm *pruning.Manager) { - noopLog := coretesting.NewNopLogger() - - rs, err := New(noopLog, ss, sc, pm, nil, nil) - s.Require().NoError(err) - - s.rootStore = rs -} - -func (s *RootStoreTestSuite) TearDownTest() { - err := s.rootStore.Close() - s.Require().NoError(err) -} - -func (s *RootStoreTestSuite) TestGetStateCommitment() { - s.Require().Equal(s.rootStore.GetStateCommitment(), s.rootStore.(*Store).stateCommitment) -} - -func (s *RootStoreTestSuite) TestGetStateStorage() { - s.Require().Equal(s.rootStore.GetStateStorage(), s.rootStore.(*Store).stateStorage) -} - -func (s *RootStoreTestSuite) TestSetInitialVersion() { - initialVersion := uint64(5) - s.Require().NoError(s.rootStore.SetInitialVersion(initialVersion)) - - // perform the initial commit - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) - - wHash, err := s.rootStore.WorkingHash(cs) - s.Require().NoError(err) - cHash, err := s.rootStore.Commit(corestore.NewChangeset()) - s.Require().NoError(err) - s.Require().Equal(wHash, cHash) - - // check the latest version - lVersion, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(initialVersion, lVersion) - - // set the initial version again - rInitialVersion := uint64(100) - s.Require().NoError(s.rootStore.SetInitialVersion(rInitialVersion)) - - // perform the commit - cs = corestore.NewChangeset() - cs.Add(testStoreKey2Bytes, []byte("foo"), []byte("bar"), false) - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - lVersion, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - // SetInitialVersion only works once - s.Require().NotEqual(rInitialVersion, lVersion) - s.Require().Equal(initialVersion+1, lVersion) -} - -func (s *RootStoreTestSuite) TestSetCommitHeader() { - h := &coreheader.Info{ - Height: 100, - Hash: []byte("foo"), - ChainID: "test", - } - s.rootStore.SetCommitHeader(h) - - s.Require().Equal(h, s.rootStore.(*Store).commitHeader) -} - -func (s *RootStoreTestSuite) TestQuery() { - _, err := s.rootStore.Query([]byte{}, 1, []byte("foo"), true) - s.Require().Error(err) - - // write and commit a changeset - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) - - commitHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(commitHash) - - // ensure the proof is non-nil for the corresponding version - result, err := s.rootStore.Query([]byte(testStoreKey), 1, []byte("foo"), true) - s.Require().NoError(err) - s.Require().NotNil(result.ProofOps) - s.Require().Equal([]byte("foo"), result.ProofOps[0].Key) -} - -func (s *RootStoreTestSuite) TestGetFallback() { - sc := s.rootStore.GetStateCommitment() - - // create a changeset and commit it to SC ONLY - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("foo"), []byte("bar"), false) - - err := sc.WriteChangeset(cs) - s.Require().NoError(err) - - ci := sc.WorkingCommitInfo(1) - _, err = sc.Commit(ci.Version) - s.Require().NoError(err) - - // ensure we can query for the key, which should fallback to SC - qResult, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("foo"), false) - s.Require().NoError(err) - s.Require().Equal([]byte("bar"), qResult.Value) - - // non-existent key - qResult, err = s.rootStore.Query(testStoreKeyBytes, 1, []byte("non_existent_key"), false) - s.Require().NoError(err) - s.Require().Nil(qResult.Value) -} - -func (s *RootStoreTestSuite) TestQueryProof() { - cs := corestore.NewChangeset() - // testStoreKey - cs.Add(testStoreKeyBytes, []byte("key1"), []byte("value1"), false) - cs.Add(testStoreKeyBytes, []byte("key2"), []byte("value2"), false) - // testStoreKey2 - cs.Add(testStoreKey2Bytes, []byte("key3"), []byte("value3"), false) - // testStoreKey3 - cs.Add(testStoreKey3Bytes, []byte("key4"), []byte("value4"), false) - - // commit - _, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - - // query proof for testStoreKey - result, err := s.rootStore.Query(testStoreKeyBytes, 1, []byte("key1"), true) - s.Require().NoError(err) - s.Require().NotNil(result.ProofOps) - cInfo, err := s.rootStore.GetStateCommitment().GetCommitInfo(1) - s.Require().NoError(err) - storeHash := cInfo.GetStoreCommitID(testStoreKeyBytes).Hash - treeRoots, err := result.ProofOps[0].Run([][]byte{[]byte("value1")}) - s.Require().NoError(err) - s.Require().Equal(treeRoots[0], storeHash) - expRoots, err := result.ProofOps[1].Run([][]byte{storeHash}) - s.Require().NoError(err) - s.Require().Equal(expRoots[0], cInfo.Hash()) -} - -func (s *RootStoreTestSuite) TestLoadVersion() { - // write and commit a few changesets - for v := 1; v <= 5; v++ { - val := fmt.Sprintf("val%03d", v) // val001, val002, ..., val005 - - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) - - commitHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(commitHash) - } - - // ensure the latest version is correct - latest, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(5), latest) - - // attempt to load a non-existent version - err = s.rootStore.LoadVersion(6) - s.Require().Error(err) - - // attempt to load a previously committed version - err = s.rootStore.LoadVersion(3) - s.Require().NoError(err) - - // ensure the latest version is correct - latest, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(3), latest) - - // query state and ensure values returned are based on the loaded version - _, ro, err := s.rootStore.StateLatest() - s.Require().NoError(err) - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - val, err := reader.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("val003"), val) - - // attempt to write and commit a few changesets - for v := 4; v <= 5; v++ { - val := fmt.Sprintf("overwritten_val%03d", v) // overwritten_val004, overwritten_val005 - - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("key"), []byte(val), false) - - commitHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(commitHash) - } - - // ensure the latest version is correct - latest, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(5), latest) - - // query state and ensure values returned are based on the loaded version - _, ro, err = s.rootStore.StateLatest() - s.Require().NoError(err) - - reader, err = ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - val, err = reader.Get([]byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("overwritten_val005"), val) -} - -func (s *RootStoreTestSuite) TestCommit() { - lv, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Zero(lv) - - // perform changes - cs := corestore.NewChangeset() - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - } - - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - // ensure latest version is updated - lv, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(1), lv) - - // perform reads on the updated root store - _, ro, err := s.rootStore.StateLatest() - s.Require().NoError(err) - - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result, err := reader.Get([]byte(key)) - s.Require().NoError(err) - - s.Require().Equal([]byte(val), result) - } -} - -func (s *RootStoreTestSuite) TestStateAt() { - // write keys over multiple versions - for v := uint64(1); v <= 5; v++ { - // perform changes - cs := corestore.NewChangeset() - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - } - - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - lv, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(5), lv) - - // ensure we can read state correctly at each version - for v := uint64(1); v <= 5; v++ { - ro, err := s.rootStore.StateAt(v) - s.Require().NoError(err) - - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1 - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result, err := reader.Get([]byte(key)) - s.Require().NoError(err) - s.Require().Equal([]byte(val), result) - } - } -} - -func (s *RootStoreTestSuite) TestWorkingHash() { - // write keys over multiple versions - for v := uint64(1); v <= 5; v++ { - // perform changes - cs := corestore.NewChangeset() - for _, storeKeyBytes := range [][]byte{testStoreKeyBytes, testStoreKey2Bytes, testStoreKey3Bytes} { - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key_%x_%03d", i, storeKeyBytes) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", i, v) // val000_1, val001_1, ..., val099_1 - - cs.Add(storeKeyBytes, []byte(key), []byte(val), false) - } - } - - wHash, err := s.rootStore.WorkingHash(cs) - s.Require().NoError(err) - // execute Commit with empty changeset - cHash, err := s.rootStore.Commit(corestore.NewChangeset()) - s.Require().NoError(err) - s.Require().Equal(wHash, cHash) - } -} - -func (s *RootStoreTestSuite) TestPrune() { - // perform changes - cs := corestore.NewChangeset() - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - } - - testCases := []struct { - name string - numVersions int64 - po store.PruningOption - deleted []uint64 - saved []uint64 - }{ - {"prune nothing", 10, store.PruningOption{ - KeepRecent: 0, - Interval: 0, - }, nil, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - {"prune everything", 12, store.PruningOption{ - KeepRecent: 1, - Interval: 10, - }, []uint64{1, 2, 3, 4, 5, 6, 7, 8}, []uint64{9, 10, 11, 12}}, - {"prune some; no batch", 10, store.PruningOption{ - KeepRecent: 2, - Interval: 1, - }, []uint64{1, 2, 3, 4, 6, 5, 7}, []uint64{8, 9, 10}}, - {"prune some; small batch", 10, store.PruningOption{ - KeepRecent: 2, - Interval: 3, - }, []uint64{1, 2, 3, 4, 5, 6}, []uint64{7, 8, 9, 10}}, - {"prune some; large batch", 10, store.PruningOption{ - KeepRecent: 2, - Interval: 11, - }, nil, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, - } - - for _, tc := range testCases { - tc := tc - - s.newStoreWithPruneConfig(&tc.po) - - // write keys over multiple versions - for i := int64(0); i < tc.numVersions; i++ { - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - for _, v := range tc.saved { - ro, err := s.rootStore.StateAt(v) - s.Require().NoError(err, "expected no error when loading height %d at test %s", v, tc.name) - - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result, err := reader.Get([]byte(key)) - s.Require().NoError(err) - s.Require().Equal([]byte(val), result, "value should be equal for test: %s", tc.name) - } - } - - for _, v := range tc.deleted { - var err error - checkErr := func() bool { - if _, err = s.rootStore.StateAt(v); err != nil { - return true - } - return false - } - // wait for async pruning process to finish - s.Require().Eventually(checkErr, 2*time.Second, 100*time.Millisecond) - s.Require().Error(err, "expected error when loading height %d at test %s", v, tc.name) - } - } -} - -func (s *RootStoreTestSuite) TestMultiStore_Pruning_SameHeightsTwice() { - // perform changes - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) - - const ( - numVersions uint64 = 10 - keepRecent uint64 = 1 - interval uint64 = 10 - ) - - s.newStoreWithPruneConfig(&store.PruningOption{ - KeepRecent: keepRecent, - Interval: interval, - }) - s.Require().NoError(s.rootStore.LoadLatestVersion()) - - for i := uint64(0); i < numVersions; i++ { - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(numVersions, latestVer) - - for v := uint64(1); v < numVersions-keepRecent; v++ { - var err error - checkErr := func() bool { - if _, err = s.rootStore.StateAt(v); err != nil { - return true - } - return false - } - // wait for async pruning process to finish - s.Require().Eventually(checkErr, 2*time.Second, 100*time.Millisecond, "expected no error when loading height: %d", v) - } - - for v := (numVersions - keepRecent); v < numVersions; v++ { - _, err := s.rootStore.StateAt(v) - s.Require().NoError(err, "expected no error when loading height: %d", v) - } - - // Get latest - err = s.rootStore.LoadVersion(numVersions - 1) - s.Require().NoError(err) - - // Test pruning the same heights again - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) - - // Ensure that can commit one more height with no panic - _, err = s.rootStore.Commit(cs) - s.Require().NoError(err) -} - -func (s *RootStoreTestSuite) TestMultiStore_PruningRestart() { - // perform changes - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) - - pruneOpt := &store.PruningOption{ - KeepRecent: 2, - Interval: 11, - } - - noopLog := coretesting.NewNopLogger() - - mdb1 := dbm.NewMemDB() - mdb2 := dbm.NewMemDB() - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss := storage.NewStorageStore(sqliteDB, noopLog) - - tree := iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) - sc, err := commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, mdb2, noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, ss, pruneOpt, pruneOpt) - - s.newStoreWithBackendMount(ss, sc, pm) - s.Require().NoError(s.rootStore.LoadLatestVersion()) - - // Commit enough to build up heights to prune, where on the next block we should - // batch delete. - for i := uint64(0); i < 10; i++ { - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - } - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - ok, actualHeightToPrune := pruneOpt.ShouldPrune(latestVer) - s.Require().False(ok) - s.Require().Equal(uint64(0), actualHeightToPrune) - - // "restart" - sqliteDB, err = sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - ss = storage.NewStorageStore(sqliteDB, noopLog) - - tree = iavl.NewIavlTree(mdb1, noopLog, iavl.DefaultConfig()) - sc, err = commitment.NewCommitStore(map[string]commitment.Tree{testStoreKey: tree}, mdb2, noopLog) - s.Require().NoError(err) - - pm = pruning.NewManager(sc, ss, pruneOpt, pruneOpt) - - s.newStoreWithBackendMount(ss, sc, pm) - err = s.rootStore.LoadLatestVersion() - s.Require().NoError(err) - - latestVer, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - ok, actualHeightToPrune = pruneOpt.ShouldPrune(latestVer) - s.Require().False(ok) - s.Require().Equal(uint64(0), actualHeightToPrune) - - // commit one more block and ensure the heights have been pruned - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - - ok, actualHeightToPrune = pruneOpt.ShouldPrune(latestVer) - s.Require().True(ok) - s.Require().Equal(uint64(8), actualHeightToPrune) - - for v := uint64(1); v <= actualHeightToPrune; v++ { - checkErr := func() bool { - if err = s.rootStore.LoadVersion(v); err != nil { - return true - } - return false - } - // wait for async pruning process to finish - s.Require().Eventually(checkErr, 5*time.Second, 100*time.Millisecond, "expected error when loading height: %d", v) - } -} - -func (s *RootStoreTestSuite) TestMultiStoreRestart() { - noopLog := coretesting.NewNopLogger() - - sqliteDB, err := sqlite.New(s.T().TempDir()) - s.Require().NoError(err) - - ss := storage.NewStorageStore(sqliteDB, noopLog) - - mdb1 := dbm.NewMemDB() - mdb2 := dbm.NewMemDB() - multiTrees := make(map[string]commitment.Tree) - for _, storeKey := range testStoreKeys { - prefixDB := dbm.NewPrefixDB(mdb1, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) - } - - sc, err := commitment.NewCommitStore(multiTrees, mdb2, noopLog) - s.Require().NoError(err) - - pm := pruning.NewManager(sc, ss, nil, nil) - - s.newStoreWithBackendMount(ss, sc, pm) - s.Require().NoError(s.rootStore.LoadLatestVersion()) - - // perform changes - for i := 1; i < 3; i++ { - cs := corestore.NewChangeset() - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", i, 1) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - - key = fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", i, 2) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKey2Bytes, []byte(key), []byte(val), false) - - key = fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", i, 3) // val000_1, val001_1, ..., val099_1 - - cs.Add(testStoreKey3Bytes, []byte(key), []byte(val), false) - - // execute Commit - cHash, err := s.rootStore.Commit(cs) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(i), latestVer) - } - - // more changes - cs1 := corestore.NewChangeset() - key := fmt.Sprintf("key%03d", 3) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d_%03d", 3, 1) // val000_1, val001_1, ..., val099_1 - - cs1.Add(testStoreKeyBytes, []byte(key), []byte(val), false) - - key = fmt.Sprintf("key%03d", 3) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", 3, 2) // val000_1, val001_1, ..., val099_1 - - cs1.Add(testStoreKey2Bytes, []byte(key), []byte(val), false) - - // execute Commit - cHash, err := s.rootStore.Commit(cs1) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err := s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(3), latestVer) - - cs2 := corestore.NewChangeset() - key = fmt.Sprintf("key%03d", 4) // key000, key001, ..., key099 - val = fmt.Sprintf("val%03d_%03d", 4, 3) // val000_1, val001_1, ..., val099_1 - - cs2.Add(testStoreKey3Bytes, []byte(key), []byte(val), false) - - // execute Commit - cHash, err = s.rootStore.Commit(cs2) - s.Require().NoError(err) - s.Require().NotNil(cHash) - - latestVer, err = s.rootStore.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(4), latestVer) - - _, ro1, err := s.rootStore.StateLatest() - s.Require().Nil(err) - reader1, err := ro1.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result1, err := reader1.Get([]byte(fmt.Sprintf("key%03d", 3))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 3, 1)), result1, "value should be equal") - - // "restart" - multiTrees = make(map[string]commitment.Tree) - for _, storeKey := range testStoreKeys { - prefixDB := dbm.NewPrefixDB(mdb1, []byte(storeKey)) - multiTrees[storeKey] = iavl.NewIavlTree(prefixDB, noopLog, iavl.DefaultConfig()) - } - - sc, err = commitment.NewCommitStore(multiTrees, mdb2, noopLog) - s.Require().NoError(err) - - pm = pruning.NewManager(sc, ss, nil, nil) - - s.newStoreWithBackendMount(ss, sc, pm) - err = s.rootStore.LoadLatestVersion() - s.Require().Nil(err) - - latestVer, ro, err := s.rootStore.StateLatest() - s.Require().Nil(err) - s.Require().Equal(uint64(4), latestVer) - reader, err := ro.GetReader(testStoreKeyBytes) - s.Require().NoError(err) - result, err := reader.Get([]byte(fmt.Sprintf("key%03d", 3))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 3, 1)), result, "value should be equal") - - reader, err = ro.GetReader(testStoreKey2Bytes) - s.Require().NoError(err) - result, err = reader.Get([]byte(fmt.Sprintf("key%03d", 2))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 2, 2)), result, "value should be equal") - - reader, err = ro.GetReader(testStoreKey3Bytes) - s.Require().NoError(err) - result, err = reader.Get([]byte(fmt.Sprintf("key%03d", 4))) - s.Require().NoError(err) - s.Require().Equal([]byte(fmt.Sprintf("val%03d_%03d", 4, 3)), result, "value should be equal") -} - -func (s *RootStoreTestSuite) TestHashStableWithEmptyCommitAndRestart() { - err := s.rootStore.LoadLatestVersion() - s.Require().Nil(err) - - emptyHash := sha256.Sum256([]byte{}) - appHash := emptyHash[:] - commitID := proof.CommitID{Hash: appHash} - lastCommitID, err := s.rootStore.LastCommitID() - s.Require().Nil(err) - - s.Require().Equal(commitID, lastCommitID) - - cs := corestore.NewChangeset() - cs.Add(testStoreKeyBytes, []byte("key"), []byte("val"), false) - - cHash, err := s.rootStore.Commit(cs) - s.Require().Nil(err) - s.Require().NotNil(cHash) - latestVersion, err := s.rootStore.GetLatestVersion() - hash := cHash - s.Require().Nil(err) - s.Require().Equal(uint64(1), latestVersion) - - // make an empty commit, it should update version, but not affect hash - cHash, err = s.rootStore.Commit(corestore.NewChangeset()) - s.Require().Nil(err) - s.Require().NotNil(cHash) - latestVersion, err = s.rootStore.GetLatestVersion() - s.Require().Nil(err) - s.Require().Equal(uint64(2), latestVersion) - s.Require().Equal(hash, cHash) - - // reload the store - s.Require().NoError(s.rootStore.LoadLatestVersion()) - lastCommitID, err = s.rootStore.LastCommitID() - s.Require().NoError(err) - s.Require().Equal(lastCommitID.Hash, hash) -} diff --git a/store/v2/snapshots/README.md b/store/v2/snapshots/README.md deleted file mode 100644 index cffb5e0ee1..0000000000 --- a/store/v2/snapshots/README.md +++ /dev/null @@ -1,284 +0,0 @@ -# State Sync Snapshotting - -The `snapshots` package implements automatic support for CometBFT state sync -in Cosmos SDK-based applications. State sync allows a new node joining a network -to simply fetch a recent snapshot of the application state instead of fetching -and applying all historical blocks. This can reduce the time needed to join the -network by several orders of magnitude (e.g. weeks to minutes), but the node -will not contain historical data from previous heights. - -This document describes the Cosmos SDK implementation of the ABCI state sync -interface, for more information on CometBFT state sync in general see: - -* [CometBFT State Sync for Developers](https://medium.com/cometbft/cometbft-core-state-sync-for-developers-70a96ba3ee35) -* [ABCI State Sync](https://docs.cometbft.com/v1.0/explanation/core/state-sync) -* [ABCI State Sync Methods](https://docs.cometbft.com/v1.0/spec/abci/abci++_basic_concepts#state-sync-methods) - -## Overview - -For an overview of how Cosmos SDK state sync is set up and configured by -developers and end-users, see the -[Cosmos SDK State Sync Guide](https://blog.cosmos.network/cosmos-sdk-state-sync-guide-99e4cf43be2f). - -Briefly, the Cosmos SDK takes state snapshots at regular height intervals given -by `state-sync.snapshot-interval` and stores them as binary files in the -filesystem under `/data/snapshots/`, with metadata in a LevelDB database -`/data/snapshots/metadata.db`. The number of recent snapshots to keep are given by -`state-sync.snapshot-keep-recent`. - -Snapshots are taken asynchronously, i.e. new blocks will be applied concurrently -with snapshots being taken. This is possible because IAVL supports querying -immutable historical heights. However, this requires heights that are multiples of `state-sync.snapshot-interval` -to be kept until after the snapshot is complete. It is done to prevent a height from being removed -while it is being snapshotted. - -When a remote node is state syncing, CometBFT calls the ABCI method -`ListSnapshots` to list available local snapshots and `LoadSnapshotChunk` to -load a binary snapshot chunk. When the local node is being state synced, -CometBFT calls `OfferSnapshot` to offer a discovered remote snapshot to the -local application and `ApplySnapshotChunk` to apply a binary snapshot chunk to -the local application. See the resources linked above for more details on these -methods and how CometBFT performs state sync. - -The Cosmos SDK does not currently do any incremental verification of snapshots -during restoration, i.e. only after the entire snapshot has been restored will -CometBFT compare the app hash against the trusted hash from the chain. Cosmos -SDK snapshots and chunks do contain hashes as checksums to guard against IO -corruption and non-determinism, but these are not tied to the chain state and -can be trivially forged by an adversary. This was considered out of scope for -the initial implementation, but can be added later without changes to the -ABCI state sync protocol. - -## Relationship to Pruning - -Snapshot settings are optional. However, if set, they have an effect on how pruning is done by -persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. - -If pruning is enabled (not `pruning = "nothing"`), we avoid pruning heights that are multiples of -`state-sync.snapshot-interval` in the regular logic determined by the -pruning settings and applied after every `Commit()`. This is done to prevent a -height from being removed before a snapshot is complete. Therefore, we keep -such heights until after a snapshot is done. At this point, the height is sent to -the `pruning.Manager` to be pruned according to the pruning settings after the next `Commit()`. - -To illustrate, assume that we are currently at height 960 with `pruning-keep-recent = 50`, -`pruning-interval = 10`, and `state-sync.snapshot-interval = 100`. Let's assume that -the snapshot that was triggered at height `900` **just finishes**. Then, we can prune height -`900` right away (that is, when we call `Commit()` at height 960 because 900 is less than `960 - 50 = 910`). - -Let's now assume that all conditions stay the same but the snapshot at height 900 is **not complete yet**. -Then, we cannot prune it to avoid deleting a height that is still being snapshotted. Therefore, we keep track -of this height until the snapshot is complete. The height 900 will be pruned at the first height h that satisfied the following conditions: - -* the snapshot is complete -* h is a multiple of `pruning-interval` -* snapshot height is less than h - `pruning-keep-recent` - -Note that in both examples, if we let current height = C, and previous height P = C - 1, then for every height h that is: - -P - `pruning-keep-recent` - `pruning-interval` <= h <= P - `pruning-keep-recent` - -we can prune height h. In our first example, all heights 899 - 909 fall in this range and are pruned at height 960 as long as -h is not a snapshot height (E.g. 900). - -That is, we always use current height to determine at which height to prune (960) while we use previous -to determine which heights are to be pruned (959 - 50 - 10 = 899-909 = 959 - 50). - -## Configuration - -* `state-sync.snapshot-interval` - * the interval at which to take snapshots. - * the value of 0 disables snapshots. - * if pruning is enabled, it is done after a snapshot is complete for the heights that are multiples of this interval. - -* `state-sync.snapshot-keep-recent`: - * the number of recent snapshots to keep. - * 0 means keep all. - -## Snapshot Metadata - -The ABCI Protobuf type for a snapshot is listed below (refer to the ABCI spec -for field details): - -```protobuf -message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // Number of chunks in the snapshot - bytes hash = 4; // Arbitrary snapshot hash, equal only if identical - bytes metadata = 5; // Arbitrary application metadata -} -``` - -Because the `metadata` field is application-specific, the Cosmos SDK uses a -similar type `cosmos.base.snapshots.v1beta1.Snapshot` with its own metadata -representation: - -```protobuf -// Snapshot contains CometBFT state sync snapshot info. -message Snapshot { - uint64 height = 1; - uint32 format = 2; - uint32 chunks = 3; - bytes hash = 4; - Metadata metadata = 5 [(gogoproto.nullable) = false]; -} - -// Metadata contains SDK-specific snapshot metadata. -message Metadata { - repeated bytes chunk_hashes = 1; // SHA-256 chunk hashes -} -``` - -The `format` is currently `1`, defined in `snapshots.types.CurrentFormat`. This -must be increased whenever the binary snapshot format changes, and it may be -useful to support past formats in newer versions. - -The `hash` is a SHA-256 hash of the entire binary snapshot, used to guard -against IO corruption and non-determinism across nodes. Note that this is not -tied to the chain state, and can be trivially forged (but CometBFT will always -compare the final app hash against the chain app hash). Similarly, the -`chunk_hashes` are SHA-256 checksums of each binary chunk. - -The `metadata` field is Protobuf-serialized before it is placed into the ABCI -snapshot. - -## Snapshot Format - -The current version `1` snapshot format is a zlib-compressed, length-prefixed -Protobuf stream of `cosmos.base.store.v1beta1.SnapshotItem` messages, split into -chunks at exact 10 MB byte boundaries. - -```protobuf -// SnapshotItem is an item contained in a rootmulti.Store snapshot. -message SnapshotItem { - // item is the specific type of snapshot item. - oneof item { - SnapshotStoreItem store = 1; - SnapshotIAVLItem iavl = 2 [(gogoproto.customname) = "IAVL"]; - } -} - -// SnapshotStoreItem contains metadata about a snapshotted store. -message SnapshotStoreItem { - string name = 1; -} - -// SnapshotIAVLItem is an exported IAVL node. -message SnapshotIAVLItem { - bytes key = 1; - bytes value = 2; - int64 version = 3; - int32 height = 4; -} -``` - -Snapshots are generated by `rootmulti.Store.Snapshot()` as follows: - -1. Set up a `protoio.NewDelimitedWriter` that writes length-prefixed serialized - `SnapshotItem` Protobuf messages. - 1. Iterate over each IAVL store in lexicographical order by store name. - 2. Emit a `SnapshotStoreItem` containing the store name. - 3. Start an IAVL export for the store using - [`iavl.ImmutableTree.Export()`](https://pkg.go.dev/github.com/cosmos/iavl#ImmutableTree.Export). - 4. Iterate over each IAVL node. - 5. Emit a `SnapshotIAVLItem` for the IAVL node. -2. Pass the serialized Protobuf output stream to a zlib compression writer. -3. Split the zlib output stream into chunks at exactly every 10th megabyte. - -Snapshots are restored via `rootmulti.Store.Restore()` as the inverse of the above, using -[`iavl.MutableTree.Import()`](https://pkg.go.dev/github.com/cosmos/iavl#MutableTree.Import) -to reconstruct each IAVL tree. - -## Snapshot Storage - -Snapshot storage is managed by `snapshots.Store`, with metadata in a `db.DB` -database and binary chunks in the filesystem. Note that this is only used to -store locally taken snapshots that are being offered to other nodes. When the -local node is being state synced, CometBFT will take care of buffering and -storing incoming snapshot chunks before they are applied to the application. - -Metadata is generally stored in a LevelDB database at -`/data/snapshots/metadata.db`. It contains serialized -`cosmos.base.snapshots.v1beta1.Snapshot` Protobuf messages with a key given by -the concatenation of a key prefix, the big-endian height, and the big-endian -format. Chunk data is stored as regular files under -`/data/snapshots///`. - -The `snapshots.Store` API is based on streaming IO, and integrates easily with -the `snapshots.types.Snapshotter` snapshot/restore interface implemented by -`rootmulti.Store`. The `Store.Save()` method stores a snapshot given as a -`<- chan io.ReadCloser` channel of binary chunk streams, and `Store.Load()` loads -the snapshot as a channel of binary chunk streams -- the same stream types used -by `Snapshotter.Snapshot()` and `Snapshotter.Restore()` to take and restore -snapshots using streaming IO. - -The store also provides many other methods such as `List()` to list stored -snapshots, `LoadChunk()` to load a single snapshot chunk, and `Prune()` to prune -old snapshots. - -## Taking Snapshots - -`snapshots.Manager` is a high-level snapshot manager that integrates a -`snapshots.types.Snapshotter` (i.e. the `rootmulti.Store` snapshot -functionality) and a `snapshots.Store`, providing an API that maps easily onto -the ABCI state sync API. The `Manager` will also make sure only one operation -is in progress at a time, e.g. to prevent multiple snapshots being taken -concurrently. - -During `BaseApp.Commit`, once a state transition has been committed, the height -is checked against the `state-sync.snapshot-interval` setting. If the committed -height should be snapshotted, a goroutine `BaseApp.snapshot()` is spawned that -calls `snapshots.Manager.Create()` to create the snapshot. Once a snapshot is -complete and if pruning is enabled, the snapshot height is pruned away by the manager -with the call `PruneSnapshotHeight(...)` to the `snapshots.types.Snapshotter`. - -`Manager.Create()` will do some basic pre-flight checks, and then start -generating a snapshot by calling `rootmulti.Store.Snapshot()`. The chunk stream -is passed into `snapshots.Store.Save()`, which stores the chunks in the -filesystem and records the snapshot metadata in the snapshot database. - -Once the snapshot has been generated, `BaseApp.snapshot()` then removes any -old snapshots based on the `state-sync.snapshot-keep-recent` setting. - -## Serving Snapshots - -When a remote node is discovering snapshots for state sync, CometBFT will -call the `ListSnapshots` ABCI method to list the snapshots present on the -local node. This is dispatched to `snapshots.Manager.List()`, which in turn -dispatches to `snapshots.Store.List()`. - -When a remote node is fetching snapshot chunks during state sync, CometBFT -will call the `LoadSnapshotChunk` ABCI method to fetch a chunk from the local -node. This dispatches to `snapshots.Manager.LoadChunk()`, which in turn -dispatches to `snapshots.Store.LoadChunk()`. - -## Restoring Snapshots - -When the operator has configured the local CometBFT node to run state sync -(see the resources listed in the introduction for details on CometBFT state -sync), it will discover snapshots across the P2P network and offer their -metadata in turn to the local application via the `OfferSnapshot` ABCI call. - -`BaseApp.OfferSnapshot()` attempts to start a restore operation by calling -`snapshots.Manager.Restore()`. This may fail, e.g. if the snapshot format is -unknown (it may have been generated by a different version of the Cosmos SDK), -in which case CometBFT will offer other discovered snapshots. - -If the snapshot is accepted, `Manager.Restore()` will record that a restore -operation is in progress, and spawn a separate goroutine that runs a synchronous -`rootmulti.Store.Restore()` snapshot restoration which will be fed snapshot -chunks until it is complete. - -CometBFT will then start fetching and buffering chunks, providing them in -order via ABCI `ApplySnapshotChunk` calls. These dispatch to -`Manager.RestoreChunk()`, which passes the chunks to the ongoing restore -process, checking if errors have been encountered yet (e.g. due to checksum -mismatches or invalid IAVL data). Once the final chunk is passed, -`Manager.RestoreChunk()` will wait for the restore process to complete before -returning. - -Once the restore is completed, CometBFT will go on to call the `Info` ABCI -call to fetch the app hash, and compare this against the trusted chain app -hash at the snapshot height to verify the restored state. If it matches, -CometBFT goes on to process blocks. diff --git a/store/v2/snapshots/chunk.go b/store/v2/snapshots/chunk.go deleted file mode 100644 index 9049460a18..0000000000 --- a/store/v2/snapshots/chunk.go +++ /dev/null @@ -1,186 +0,0 @@ -package snapshots - -import ( - stderrors "errors" - "io" - "math" - - "cosmossdk.io/errors" - storeerrors "cosmossdk.io/store/v2/errors" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -// ChunkWriter reads an input stream, splits it into fixed-size chunks, and writes them to a -// sequence of io.ReadClosers via a channel. -type ChunkWriter struct { - ch chan<- io.ReadCloser - pipe *io.PipeWriter - chunkSize uint64 - written uint64 - closed bool -} - -// NewChunkWriter creates a new ChunkWriter. If chunkSize is 0, no chunking will be done. -func NewChunkWriter(ch chan<- io.ReadCloser, chunkSize uint64) *ChunkWriter { - return &ChunkWriter{ - ch: ch, - chunkSize: chunkSize, - } -} - -// chunk creates a new chunk. -func (w *ChunkWriter) chunk() error { - if w.pipe != nil { - err := w.pipe.Close() - if err != nil { - return err - } - } - pr, pw := io.Pipe() - w.ch <- pr - w.pipe = pw - w.written = 0 - return nil -} - -// Close implements io.Closer. -func (w *ChunkWriter) Close() error { - if !w.closed { - w.closed = true - close(w.ch) - var err error - if w.pipe != nil { - err = w.pipe.Close() - } - return err - } - return nil -} - -// CloseWithError closes the writer and sends an error to the reader. -func (w *ChunkWriter) CloseWithError(err error) { - if !w.closed { - if w.pipe == nil { - // create a dummy pipe just to propagate the error to the reader, it always returns nil - _ = w.chunk() - } - w.closed = true - close(w.ch) - _ = w.pipe.CloseWithError(err) // CloseWithError always returns nil - } -} - -// Write implements io.Writer. -func (w *ChunkWriter) Write(data []byte) (int, error) { - if w.closed { - return 0, errors.Wrap(storeerrors.ErrLogic, "cannot write to closed ChunkWriter") - } - nTotal := 0 - for len(data) > 0 { - if w.pipe == nil || (w.written >= w.chunkSize && w.chunkSize > 0) { - err := w.chunk() - if err != nil { - return nTotal, err - } - } - - var writeSize uint64 - if w.chunkSize == 0 { - writeSize = uint64(len(data)) - } else { - writeSize = w.chunkSize - w.written - } - if writeSize > uint64(len(data)) { - writeSize = uint64(len(data)) - } - - n, err := w.pipe.Write(data[:writeSize]) - w.written += uint64(n) - nTotal += n - if err != nil { - return nTotal, err - } - data = data[writeSize:] - } - return nTotal, nil -} - -// ChunkReader reads chunks from a channel of io.ReadClosers and outputs them as an io.Reader -type ChunkReader struct { - ch <-chan io.ReadCloser - reader io.ReadCloser -} - -// NewChunkReader creates a new ChunkReader. -func NewChunkReader(ch <-chan io.ReadCloser) *ChunkReader { - return &ChunkReader{ch: ch} -} - -// next fetches the next chunk from the channel, or returns io.EOF if there are no more chunks. -func (r *ChunkReader) next() error { - reader, ok := <-r.ch - if !ok { - return io.EOF - } - r.reader = reader - return nil -} - -// Close implements io.ReadCloser. -func (r *ChunkReader) Close() error { - var err error - if r.reader != nil { - err = r.reader.Close() - r.reader = nil - } - for reader := range r.ch { - if e := reader.Close(); e != nil && err == nil { - err = e - } - } - return err -} - -// Read implements io.Reader. -func (r *ChunkReader) Read(p []byte) (int, error) { - if r.reader == nil { - err := r.next() - if err != nil { - return 0, err - } - } - n, err := r.reader.Read(p) - if stderrors.Is(err, io.EOF) { - err = r.reader.Close() - r.reader = nil - if err != nil { - return 0, err - } - return r.Read(p) - } - return n, err -} - -// DrainChunks drains and closes all remaining chunks from a chunk channel. -func DrainChunks(chunks <-chan io.ReadCloser) { - for chunk := range chunks { - _ = chunk.Close() - } -} - -// ValidRestoreHeight will check height is valid for snapshot restore or not -func ValidRestoreHeight(format uint32, height uint64) error { - if format != snapshotstypes.CurrentFormat { - return errors.Wrapf(snapshotstypes.ErrUnknownFormat, "format %v", format) - } - - if height == 0 { - return errors.Wrap(storeerrors.ErrLogic, "cannot restore snapshot at height 0") - } - if height > uint64(math.MaxInt64) { - return errors.Wrapf(snapshotstypes.ErrInvalidMetadata, - "snapshot height %v cannot exceed %v", height, int64(math.MaxInt64)) - } - - return nil -} diff --git a/store/v2/snapshots/chunk_test.go b/store/v2/snapshots/chunk_test.go deleted file mode 100644 index 2cf00eef27..0000000000 --- a/store/v2/snapshots/chunk_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package snapshots_test - -import ( - "bytes" - "errors" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/v2/snapshots" -) - -func TestChunkWriter(t *testing.T) { - ch := make(chan io.ReadCloser, 100) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, 2) - - n, err := chunkWriter.Write([]byte{1, 2, 3}) - require.NoError(t, err) - assert.Equal(t, 3, n) - - n, err = chunkWriter.Write([]byte{4, 5, 6}) - require.NoError(t, err) - assert.Equal(t, 3, n) - - n, err = chunkWriter.Write([]byte{7, 8, 9}) - require.NoError(t, err) - assert.Equal(t, 3, n) - - err = chunkWriter.Close() - require.NoError(t, err) - - // closed writer should error - _, err = chunkWriter.Write([]byte{10}) - require.Error(t, err) - - // closing again should be fine - err = chunkWriter.Close() - require.NoError(t, err) - }() - - assert.Equal(t, [][]byte{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9}}, readChunks(ch)) - - // 0-sized chunks should return the whole body as one chunk - ch = make(chan io.ReadCloser, 100) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, 0) - _, err := chunkWriter.Write([]byte{1, 2, 3}) - require.NoError(t, err) - _, err = chunkWriter.Write([]byte{4, 5, 6}) - require.NoError(t, err) - err = chunkWriter.Close() - require.NoError(t, err) - }() - assert.Equal(t, [][]byte{{1, 2, 3, 4, 5, 6}}, readChunks(ch)) - - // closing with error should return the error - theErr := errors.New("boom") - ch = make(chan io.ReadCloser, 100) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, 2) - _, err := chunkWriter.Write([]byte{1, 2, 3}) - require.NoError(t, err) - chunkWriter.CloseWithError(theErr) - }() - chunk, err := io.ReadAll(<-ch) - require.NoError(t, err) - assert.Equal(t, []byte{1, 2}, chunk) - _, err = io.ReadAll(<-ch) - require.Error(t, err) - assert.Equal(t, theErr, err) - assert.Empty(t, ch) - - // closing immediately should return no chunks - ch = make(chan io.ReadCloser, 100) - chunkWriter := snapshots.NewChunkWriter(ch, 2) - err = chunkWriter.Close() - require.NoError(t, err) - assert.Empty(t, ch) -} - -func TestChunkReader(t *testing.T) { - ch := makeChunks([][]byte{ - {1, 2, 3}, - {4}, - {}, - {5, 6}, - }) - chunkReader := snapshots.NewChunkReader(ch) - - buf := []byte{0, 0, 0, 0} - n, err := chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, 3, n) - assert.Equal(t, []byte{1, 2, 3, 0}, buf) - - buf = []byte{0, 0, 0, 0} - n, err = chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, 1, n) - assert.Equal(t, []byte{4, 0, 0, 0}, buf) - - buf = []byte{0, 0, 0, 0} - n, err = chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, 2, n) - assert.Equal(t, []byte{5, 6, 0, 0}, buf) - - buf = []byte{0, 0, 0, 0} - _, err = chunkReader.Read(buf) - require.Error(t, err) - assert.Equal(t, io.EOF, err) - - err = chunkReader.Close() - require.NoError(t, err) - - err = chunkReader.Close() // closing twice should be fine - require.NoError(t, err) - - // Empty channel should be fine - ch = makeChunks(nil) - chunkReader = snapshots.NewChunkReader(ch) - buf = make([]byte, 4) - _, err = chunkReader.Read(buf) - require.Error(t, err) - assert.Equal(t, io.EOF, err) - - // Using a pipe that closes with an error should return the error - theErr := errors.New("boom") - pr, pw := io.Pipe() - pch := make(chan io.ReadCloser, 1) - pch <- pr - _ = pw.CloseWithError(theErr) - - chunkReader = snapshots.NewChunkReader(pch) - buf = make([]byte, 4) - _, err = chunkReader.Read(buf) - require.Error(t, err) - assert.Equal(t, theErr, err) - - // Closing the reader should close the writer - pr, pw = io.Pipe() - pch = make(chan io.ReadCloser, 2) - pch <- io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3})) - pch <- pr - close(pch) - - go func() { - chunkReader := snapshots.NewChunkReader(pch) - buf := []byte{0, 0, 0, 0} - _, err := chunkReader.Read(buf) - require.NoError(t, err) - assert.Equal(t, []byte{1, 2, 3, 0}, buf) - - err = chunkReader.Close() - require.NoError(t, err) - }() - - _, err = pw.Write([]byte{9, 9, 9}) - require.Error(t, err) - assert.Equal(t, err, io.ErrClosedPipe) -} diff --git a/store/v2/snapshots/helpers_test.go b/store/v2/snapshots/helpers_test.go deleted file mode 100644 index 3157f35316..0000000000 --- a/store/v2/snapshots/helpers_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package snapshots_test - -import ( - "bufio" - "bytes" - "compress/zlib" - "crypto/sha256" - "errors" - "io" - "testing" - "time" - - protoio "github.com/cosmos/gogoproto/io" - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/store/v2/snapshots" - snapshotstypes "cosmossdk.io/store/v2/snapshots/types" -) - -func checksums(slice [][]byte) [][]byte { - hasher := sha256.New() - checksums := make([][]byte, len(slice)) - for i, chunk := range slice { - hasher.Write(chunk) - checksums[i] = hasher.Sum(nil) - hasher.Reset() - } - return checksums -} - -func hash(chunks [][]byte) []byte { - hasher := sha256.New() - for _, chunk := range chunks { - hasher.Write(chunk) - } - return hasher.Sum(nil) -} - -func makeChunks(chunks [][]byte) <-chan io.ReadCloser { - ch := make(chan io.ReadCloser, len(chunks)) - for _, chunk := range chunks { - ch <- io.NopCloser(bytes.NewReader(chunk)) - } - close(ch) - return ch -} - -func readChunks(chunks <-chan io.ReadCloser) [][]byte { - bodies := [][]byte{} - for chunk := range chunks { - body, err := io.ReadAll(chunk) - if err != nil { - panic(err) - } - bodies = append(bodies, body) - } - return bodies -} - -// snapshotItems serialize a array of bytes as SnapshotItem_ExtensionPayload, and return the chunks. -func snapshotItems(items [][]byte, ext snapshots.ExtensionSnapshotter) [][]byte { - // copy the same parameters from the code - snapshotChunkSize := uint64(10e6) - snapshotBufferSize := int(snapshotChunkSize) - - ch := make(chan io.ReadCloser) - go func() { - chunkWriter := snapshots.NewChunkWriter(ch, snapshotChunkSize) - bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) - zWriter, _ := zlib.NewWriterLevel(bufWriter, 7) - protoWriter := protoio.NewDelimitedWriter(zWriter) - for _, item := range items { - _ = snapshotstypes.WriteExtensionPayload(protoWriter, item) - } - // write extension metadata - _ = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{ - Item: &snapshotstypes.SnapshotItem_Extension{ - Extension: &snapshotstypes.SnapshotExtensionMeta{ - Name: ext.SnapshotName(), - Format: ext.SnapshotFormat(), - }, - }, - }) - _ = ext.SnapshotExtension(0, func(payload []byte) error { - return snapshotstypes.WriteExtensionPayload(protoWriter, payload) - }) - _ = protoWriter.Close() - _ = bufWriter.Flush() - _ = chunkWriter.Close() - }() - - var chunks [][]byte - for chunkBody := range ch { - chunk, err := io.ReadAll(chunkBody) - if err != nil { - panic(err) - } - chunks = append(chunks, chunk) - } - - return chunks -} - -type mockCommitSnapshotter struct { - items [][]byte -} - -func (m *mockCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, -) (snapshotstypes.SnapshotItem, error) { - if format == 0 { - return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat - } - if m.items != nil { - return snapshotstypes.SnapshotItem{}, errors.New("already has contents") - } - - var item snapshotstypes.SnapshotItem - m.items = [][]byte{} - for { - item.Reset() - err := protoReader.ReadMsg(&item) - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return snapshotstypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message") - } - payload := item.GetExtensionPayload() - if payload == nil { - break - } - m.items = append(m.items, payload.Payload) - } - - return item, nil -} - -func (m *mockCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - for _, item := range m.items { - if err := snapshotstypes.WriteExtensionPayload(protoWriter, item); err != nil { - return err - } - } - return nil -} - -func (m *mockCommitSnapshotter) SnapshotFormat() uint32 { - return snapshotstypes.CurrentFormat -} - -func (m *mockCommitSnapshotter) SupportedFormats() []uint32 { - return []uint32{snapshotstypes.CurrentFormat} -} - -type mockStorageSnapshotter struct{} - -func (m *mockStorageSnapshotter) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error { - return nil -} - -type mockErrorCommitSnapshotter struct{} - -var _ snapshots.CommitSnapshotter = (*mockErrorCommitSnapshotter)(nil) - -func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - return errors.New("mock snapshot error") -} - -func (m *mockErrorCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, -) (snapshotstypes.SnapshotItem, error) { - return snapshotstypes.SnapshotItem{}, errors.New("mock restore error") -} - -func (m *mockErrorCommitSnapshotter) SnapshotFormat() uint32 { - return snapshotstypes.CurrentFormat -} - -func (m *mockErrorCommitSnapshotter) SupportedFormats() []uint32 { - return []uint32{snapshotstypes.CurrentFormat} -} - -// setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1. -// The snapshot will complete when the returned closer is called. -func setupBusyManager(t *testing.T) *snapshots.Manager { - t.Helper() - store, err := snapshots.NewStore(t.TempDir()) - require.NoError(t, err) - hung := newHungCommitSnapshotter() - mgr := snapshots.NewManager(store, opts, hung, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - - // Channel to ensure the test doesn't finish until the goroutine is done. - // Without this, there are intermittent test failures about - // the t.TempDir() cleanup failing due to the directory not being empty. - done := make(chan struct{}) - - go func() { - defer close(done) - _, err := mgr.Create(1) - require.NoError(t, err) - }() - time.Sleep(10 * time.Millisecond) - - t.Cleanup(func() { - <-done - }) - - t.Cleanup(hung.Close) - - return mgr -} - -// hungCommitSnapshotter can be used to test operations in progress. Call close to end the snapshot. -type hungCommitSnapshotter struct { - ch chan struct{} -} - -func newHungCommitSnapshotter() *hungCommitSnapshotter { - return &hungCommitSnapshotter{ - ch: make(chan struct{}), - } -} - -func (m *hungCommitSnapshotter) Close() { - close(m.ch) -} - -func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error { - <-m.ch - return nil -} - -func (m *hungCommitSnapshotter) Restore( - height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges, -) (snapshotstypes.SnapshotItem, error) { - panic("not implemented") -} - -type extSnapshotter struct { - state []uint64 -} - -func newExtSnapshotter(count int) *extSnapshotter { - state := make([]uint64, 0, count) - for i := 0; i < count; i++ { - state = append(state, uint64(i)) - } - return &extSnapshotter{ - state, - } -} - -func (s *extSnapshotter) SnapshotName() string { - return "mock" -} - -func (s *extSnapshotter) SnapshotFormat() uint32 { - return 1 -} - -func (s *extSnapshotter) SupportedFormats() []uint32 { - return []uint32{1} -} - -func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshots.ExtensionPayloadWriter) error { - for _, i := range s.state { - if err := payloadWriter(snapshotstypes.Uint64ToBigEndian(i)); err != nil { - return err - } - } - return nil -} - -func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshots.ExtensionPayloadReader) error { - for { - payload, err := payloadReader() - if errors.Is(err, io.EOF) { - break - } else if err != nil { - return err - } - s.state = append(s.state, snapshotstypes.BigEndianToUint64(payload)) - } - // finalize restoration - return nil -} diff --git a/store/v2/snapshots/manager.go b/store/v2/snapshots/manager.go deleted file mode 100644 index 3bee0a5832..0000000000 --- a/store/v2/snapshots/manager.go +++ /dev/null @@ -1,610 +0,0 @@ -package snapshots - -import ( - "bytes" - "crypto/sha256" - "errors" - "fmt" - "io" - "math" - "os" - "sort" - "sync" - - corelog "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - errorsmod "cosmossdk.io/errors" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/snapshots/types" -) - -// Manager manages snapshot and restore operations for an app, making sure only a single -// long-running operation is in progress at any given time, and provides convenience methods -// mirroring the ABCI interface. -// -// Although the ABCI interface (and this manager) passes chunks as byte slices, the internal -// snapshot/restore APIs use IO streams (i.e. chan io.ReadCloser), for two reasons: -// -// 1. In the future, ABCI should support streaming. Consider e.g. InitChain during chain -// upgrades, which currently passes the entire chain state as an in-memory byte slice. -// https://github.com/tendermint/tendermint/issues/5184 -// -// 2. io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary -// errors via io.Pipe.CloseWithError(). -type Manager struct { - extensions map[string]ExtensionSnapshotter - // store is the snapshot store where all completed snapshots are persisted. - store *Store - opts SnapshotOptions - // commitSnapshotter is the snapshotter for the commitment state. - commitSnapshotter CommitSnapshotter - // storageSnapshotter is the snapshotter for the storage state. - storageSnapshotter StorageSnapshotter - - logger corelog.Logger - - mtx sync.Mutex - operation operation - chRestore chan<- uint32 - chRestoreDone <-chan restoreDone - restoreSnapshot *types.Snapshot - restoreChunkIndex uint32 -} - -// operation represents a Manager operation. Only one operation can be in progress at a time. -type operation string - -// restoreDone represents the result of a restore operation. -type restoreDone struct { - complete bool // if true, restore completed successfully (not prematurely) - err error // if non-nil, restore errored -} - -const ( - opNone operation = "" - opSnapshot operation = "snapshot" - opPrune operation = "prune" - opRestore operation = "restore" - - chunkBufferSize = 4 - chunkIDBufferSize = 1024 - defaultStorageChannelBufferSize = 1024 - - snapshotMaxItemSize = int(64e6) // SDK has no key/value size limit, so we set an arbitrary limit -) - -var ErrOptsZeroSnapshotInterval = errors.New("snapshot-interval must not be 0") - -// NewManager creates a new manager. -func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, storageSnapshotter StorageSnapshotter, extensions map[string]ExtensionSnapshotter, logger corelog.Logger) *Manager { - if extensions == nil { - extensions = map[string]ExtensionSnapshotter{} - } - return &Manager{ - store: store, - opts: opts, - commitSnapshotter: commitSnapshotter, - storageSnapshotter: storageSnapshotter, - extensions: extensions, - logger: logger, - } -} - -// RegisterExtensions register extension snapshotters to manager -func (m *Manager) RegisterExtensions(extensions ...ExtensionSnapshotter) error { - if m.extensions == nil { - m.extensions = make(map[string]ExtensionSnapshotter, len(extensions)) - } - for _, extension := range extensions { - name := extension.SnapshotName() - if _, ok := m.extensions[name]; ok { - return fmt.Errorf("duplicated snapshotter name: %s", name) - } - if !IsFormatSupported(extension, extension.SnapshotFormat()) { - return fmt.Errorf("snapshotter don't support it's own snapshot format: %s %d", name, extension.SnapshotFormat()) - } - m.extensions[name] = extension - } - return nil -} - -// begin starts an operation, or errors if one is in progress. It manages the mutex itself. -func (m *Manager) begin(op operation) error { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.beginLocked(op) -} - -// beginLocked begins an operation while already holding the mutex. -func (m *Manager) beginLocked(op operation) error { - if op == opNone { - return errorsmod.Wrap(storeerrors.ErrLogic, "can't begin a none operation") - } - if m.operation != opNone { - return errorsmod.Wrapf(storeerrors.ErrConflict, "a %v operation is in progress", m.operation) - } - m.operation = op - return nil -} - -// end ends the current operation. -func (m *Manager) end() { - m.mtx.Lock() - defer m.mtx.Unlock() - m.endLocked() -} - -// endLocked ends the current operation while already holding the mutex. -func (m *Manager) endLocked() { - m.operation = opNone - if m.chRestore != nil { - close(m.chRestore) - m.chRestore = nil - } - m.chRestoreDone = nil - m.restoreSnapshot = nil - m.restoreChunkIndex = 0 -} - -// GetInterval returns snapshot interval represented in heights. -func (m *Manager) GetInterval() uint64 { - return m.opts.Interval -} - -// GetKeepRecent returns snapshot keep-recent represented in heights. -func (m *Manager) GetKeepRecent() uint32 { - return m.opts.KeepRecent -} - -// GetSnapshotBlockRetentionHeights returns the number of heights needed -// for block retention. Blocks since the oldest available snapshot must be -// available for state sync nodes to catch up (oldest because a node may be -// restoring an old snapshot while a new snapshot was taken). -func (m *Manager) GetSnapshotBlockRetentionHeights() int64 { - return int64(m.opts.Interval * uint64(m.opts.KeepRecent)) -} - -// Create creates a snapshot and returns its metadata. -func (m *Manager) Create(height uint64) (*types.Snapshot, error) { - if m == nil { - return nil, errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil") - } - - err := m.begin(opSnapshot) - if err != nil { - return nil, err - } - defer m.end() - - latest, err := m.store.GetLatest() - if err != nil { - return nil, errorsmod.Wrap(err, "failed to examine latest snapshot") - } - if latest != nil && latest.Height >= height { - return nil, errorsmod.Wrapf(storeerrors.ErrConflict, - "a more recent snapshot already exists at height %v", latest.Height) - } - - // Spawn goroutine to generate snapshot chunks and pass their io.ReadClosers through a channel - ch := make(chan io.ReadCloser) - go m.createSnapshot(height, ch) - - return m.store.Save(height, types.CurrentFormat, ch) -} - -// createSnapshot do the heavy work of snapshotting after the validations of request are done -// the produced chunks are written to the channel. -func (m *Manager) createSnapshot(height uint64, ch chan<- io.ReadCloser) { - streamWriter := NewStreamWriter(ch) - if streamWriter == nil { - return - } - defer func() { - if err := streamWriter.Close(); err != nil { - streamWriter.CloseWithError(err) - } - }() - - if err := m.commitSnapshotter.Snapshot(height, streamWriter); err != nil { - streamWriter.CloseWithError(err) - return - } - for _, name := range m.sortedExtensionNames() { - extension := m.extensions[name] - // write extension metadata - err := streamWriter.WriteMsg(&types.SnapshotItem{ - Item: &types.SnapshotItem_Extension{ - Extension: &types.SnapshotExtensionMeta{ - Name: name, - Format: extension.SnapshotFormat(), - }, - }, - }) - if err != nil { - streamWriter.CloseWithError(err) - return - } - payloadWriter := func(payload []byte) error { - return types.WriteExtensionPayload(streamWriter, payload) - } - if err := extension.SnapshotExtension(height, payloadWriter); err != nil { - streamWriter.CloseWithError(err) - return - } - } -} - -// CreateMigration creates a migration snapshot and writes it to the given writer. -// It is used to migrate the state from the original store to the store/v2. -func (m *Manager) CreateMigration(height uint64, protoWriter WriteCloser) error { - if m == nil { - return errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil") - } - - err := m.begin(opSnapshot) - if err != nil { - return err - } - // m.end() will be called by the migration manager with EndMigration(). - - go func() { - if err := m.commitSnapshotter.Snapshot(height, protoWriter); err != nil { - protoWriter.CloseWithError(err) - return - } - _ = protoWriter.Close() // always return nil - }() - - return nil -} - -// EndMigration ends the migration operation. -// It will replace the current commitSnapshotter with the new one. -func (m *Manager) EndMigration(commitSnapshotter CommitSnapshotter) { - defer m.end() - m.commitSnapshotter = commitSnapshotter -} - -// List lists snapshots, mirroring ABCI ListSnapshots. It can be concurrent with other operations. -func (m *Manager) List() ([]*types.Snapshot, error) { - return m.store.List() -} - -// LoadChunk loads a chunk into a byte slice, mirroring ABCI LoadChunk. It can be called -// concurrently with other operations. If the chunk does not exist, nil is returned. -func (m *Manager) LoadChunk(height uint64, format, chunk uint32) ([]byte, error) { - reader, err := m.store.LoadChunk(height, format, chunk) - if err != nil { - return nil, err - } - if reader == nil { - return nil, nil - } - defer reader.Close() - - return io.ReadAll(reader) -} - -// Prune prunes snapshots, if no other operations are in progress. -func (m *Manager) Prune(retain uint32) (uint64, error) { - err := m.begin(opPrune) - if err != nil { - return 0, err - } - defer m.end() - return m.store.Prune(retain) -} - -// Restore begins an async snapshot restoration, mirroring ABCI OfferSnapshot. Chunks must be fed -// via RestoreChunk() until the restore is complete or a chunk fails. -func (m *Manager) Restore(snapshot types.Snapshot) error { - if snapshot.Chunks == 0 { - return errorsmod.Wrap(types.ErrInvalidMetadata, "no chunks") - } - if uint32(len(snapshot.Metadata.ChunkHashes)) != snapshot.Chunks { - return errorsmod.Wrapf(types.ErrInvalidMetadata, "snapshot has %v chunk hashes, but %v chunks", - uint32(len(snapshot.Metadata.ChunkHashes)), - snapshot.Chunks) - } - m.mtx.Lock() - defer m.mtx.Unlock() - - // check multistore supported format preemptive - if snapshot.Format != types.CurrentFormat { - return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format) - } - if snapshot.Height == 0 { - return errorsmod.Wrap(storeerrors.ErrLogic, "cannot restore snapshot at height 0") - } - if snapshot.Height > uint64(math.MaxInt64) { - return errorsmod.Wrapf(types.ErrInvalidMetadata, - "snapshot height %v cannot exceed %v", snapshot.Height, int64(math.MaxInt64)) - } - - err := m.beginLocked(opRestore) - if err != nil { - return err - } - - // Start an asynchronous snapshot restoration, passing chunks and completion status via channels. - chChunkIDs := make(chan uint32, chunkIDBufferSize) - chDone := make(chan restoreDone, 1) - - dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) - if err := os.MkdirAll(dir, 0o750); err != nil { - return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - chChunks := m.loadChunkStream(snapshot.Height, snapshot.Format, chChunkIDs) - - go func() { - err := m.doRestoreSnapshot(snapshot, chChunks) - chDone <- restoreDone{ - complete: err == nil, - err: err, - } - close(chDone) - }() - - m.chRestore = chChunkIDs - m.chRestoreDone = chDone - m.restoreSnapshot = &snapshot - m.restoreChunkIndex = 0 - return nil -} - -func (m *Manager) loadChunkStream(height uint64, format uint32, chunkIDs <-chan uint32) <-chan io.ReadCloser { - chunks := make(chan io.ReadCloser, chunkBufferSize) - go func() { - defer close(chunks) - - for chunkID := range chunkIDs { - chunk, err := m.store.loadChunkFile(height, format, chunkID) - if err != nil { - m.logger.Error("load chunk file failed", "height", height, "format", format, "chunk", chunkID, "err", err) - break - } - chunks <- chunk - } - }() - - return chunks -} - -// doRestoreSnapshot do the heavy work of snapshot restoration after preliminary checks on request have passed. -func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.ReadCloser) error { - dir := m.store.pathSnapshot(snapshot.Height, snapshot.Format) - if err := os.MkdirAll(dir, 0o750); err != nil { - return errorsmod.Wrapf(err, "failed to create snapshot directory %q", dir) - } - - var nextItem types.SnapshotItem - streamReader, err := NewStreamReader(chChunks) - if err != nil { - return err - } - defer streamReader.Close() - - // payloadReader reads an extension payload for extension snapshotter, it returns `io.EOF` at extension boundaries. - payloadReader := func() ([]byte, error) { - nextItem.Reset() - if err := streamReader.ReadMsg(&nextItem); err != nil { - return nil, err - } - payload := nextItem.GetExtensionPayload() - if payload == nil { - return nil, io.EOF - } - return payload.Payload, nil - } - - // chStorage is the channel to pass the KV pairs to the storage snapshotter. - chStorage := make(chan *corestore.StateChanges, defaultStorageChannelBufferSize) - defer close(chStorage) - - storageErrs := make(chan error, 1) - go func() { - defer close(storageErrs) - err := m.storageSnapshotter.Restore(snapshot.Height, chStorage) - if err != nil { - storageErrs <- err - } - }() - - nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader, chStorage) - if err != nil { - return errorsmod.Wrap(err, "multistore restore") - } - - for { - if nextItem.Item == nil { - // end of stream - break - } - metadata := nextItem.GetExtension() - if metadata == nil { - return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown snapshot item %T", nextItem.Item) - } - extension, ok := m.extensions[metadata.Name] - if !ok { - return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown extension snapshotter %s", metadata.Name) - } - if !IsFormatSupported(extension, metadata.Format) { - return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name) - } - - if err := extension.RestoreExtension(snapshot.Height, metadata.Format, payloadReader); err != nil { - return errorsmod.Wrapf(err, "extension %s restore", metadata.Name) - } - - if nextItem.GetExtensionPayload() != nil { - return fmt.Errorf("extension %s don't exhausted payload stream", metadata.Name) - } - } - - // wait for storage snapshotter to complete - if err := <-storageErrs; err != nil { - return errorsmod.Wrap(err, "storage snapshotter") - } - - return nil -} - -// RestoreChunk adds a chunk to an active snapshot restoration, mirroring ABCI ApplySnapshotChunk. -// Chunks must be given until the restore is complete, returning true, or a chunk errors. -func (m *Manager) RestoreChunk(chunk []byte) (bool, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - if m.operation != opRestore { - return false, errorsmod.Wrap(storeerrors.ErrLogic, "no restore operation in progress") - } - - if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { - return false, errorsmod.Wrap(storeerrors.ErrLogic, "received unexpected chunk") - } - - // Check if any errors have occurred yet. - select { - case done := <-m.chRestoreDone: - m.endLocked() - if done.err != nil { - return false, done.err - } - return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended unexpectedly") - default: - } - - // Verify the chunk hash. - hash := sha256.Sum256(chunk) - expected := m.restoreSnapshot.Metadata.ChunkHashes[m.restoreChunkIndex] - if !bytes.Equal(hash[:], expected) { - return false, errorsmod.Wrapf(types.ErrChunkHashMismatch, - "expected %x, got %x", hash, expected) - } - - if err := m.store.saveChunkContent(chunk, m.restoreChunkIndex, m.restoreSnapshot); err != nil { - return false, errorsmod.Wrapf(err, "save chunk content %d", m.restoreChunkIndex) - } - - // Pass the chunk to the restore, and wait for completion if it was the final one. - m.chRestore <- m.restoreChunkIndex - m.restoreChunkIndex++ - - if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) { - close(m.chRestore) - m.chRestore = nil - - // the chunks are all written into files, we can save the snapshot to the db, - // even if the restoration may not completed yet. - if err := m.store.saveSnapshot(m.restoreSnapshot); err != nil { - return false, errorsmod.Wrap(err, "save restoring snapshot") - } - - done := <-m.chRestoreDone - m.endLocked() - if done.err != nil { - return false, done.err - } - if !done.complete { - return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended prematurely") - } - - return true, nil - } - return false, nil -} - -// RestoreLocalSnapshot restores app state from a local snapshot. -func (m *Manager) RestoreLocalSnapshot(height uint64, format uint32) error { - snapshot, ch, err := m.store.Load(height, format) - if err != nil { - return err - } - - if snapshot == nil { - return fmt.Errorf("snapshot doesn't exist, height: %d, format: %d", height, format) - } - - m.mtx.Lock() - defer m.mtx.Unlock() - - err = m.beginLocked(opRestore) - if err != nil { - return err - } - defer m.endLocked() - - return m.doRestoreSnapshot(*snapshot, ch) -} - -// sortedExtensionNames sort extension names for deterministic iteration. -func (m *Manager) sortedExtensionNames() []string { - names := make([]string, 0, len(m.extensions)) - for name := range m.extensions { - names = append(names, name) - } - - sort.Strings(names) - return names -} - -// IsFormatSupported returns if the snapshotter supports restoration from given format. -func IsFormatSupported(snapshotter ExtensionSnapshotter, format uint32) bool { - for _, i := range snapshotter.SupportedFormats() { - if i == format { - return true - } - } - return false -} - -// SnapshotIfApplicable takes a snapshot of the current state if we are on a snapshot height. -// It also prunes any old snapshots. -func (m *Manager) SnapshotIfApplicable(height int64) { - if m == nil { - return - } - if !m.shouldTakeSnapshot(height) { - m.logger.Debug("snapshot is skipped", "height", height) - return - } - // start the routine after need to create a snapshot - go m.snapshot(height) -} - -// shouldTakeSnapshot returns true is snapshot should be taken at height. -func (m *Manager) shouldTakeSnapshot(height int64) bool { - return m.opts.Interval > 0 && uint64(height)%m.opts.Interval == 0 -} - -func (m *Manager) snapshot(height int64) { - m.logger.Info("creating state snapshot", "height", height) - - if height <= 0 { - m.logger.Error("snapshot height must be positive", "height", height) - return - } - - snapshot, err := m.Create(uint64(height)) - if err != nil { - m.logger.Error("failed to create state snapshot", "height", height, "err", err) - return - } - - m.logger.Info("completed state snapshot", "height", height, "format", snapshot.Format) - - if m.opts.KeepRecent > 0 { - m.logger.Debug("pruning state snapshots") - - pruned, err := m.Prune(m.opts.KeepRecent) - if err != nil { - m.logger.Error("Failed to prune state snapshots", "err", err) - return - } - - m.logger.Debug("pruned state snapshots", "pruned", pruned) - } -} - -// Close the snapshot database. -func (m *Manager) Close() error { return nil } diff --git a/store/v2/snapshots/manager_test.go b/store/v2/snapshots/manager_test.go deleted file mode 100644 index 4fd33e1b36..0000000000 --- a/store/v2/snapshots/manager_test.go +++ /dev/null @@ -1,420 +0,0 @@ -package snapshots_test - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/snapshots/types" -) - -var opts = snapshots.NewSnapshotOptions(1500, 2) - -func TestManager_List(t *testing.T) { - store := setupStore(t) - commitSnapshotter := &mockCommitSnapshotter{} - storageSnapshotter := &mockStorageSnapshotter{} - manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, coretesting.NewNopLogger()) - - mgrList, err := manager.List() - require.NoError(t, err) - storeList, err := store.List() - require.NoError(t, err) - - require.NotEmpty(t, storeList) - assert.Equal(t, storeList, mgrList) - - // list should not block or error on busy managers - manager = setupBusyManager(t) - list, err := manager.List() - require.NoError(t, err) - assert.Equal(t, []*types.Snapshot{}, list) - - require.NoError(t, manager.Close()) -} - -func TestManager_LoadChunk(t *testing.T) { - store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - - // Existing chunk should return body - chunk, err := manager.LoadChunk(2, 1, 1) - require.NoError(t, err) - assert.Equal(t, []byte{2, 1, 1}, chunk) - - // Missing chunk should return nil - chunk, err = manager.LoadChunk(2, 1, 9) - require.NoError(t, err) - assert.Nil(t, chunk) - - // LoadChunk should not block or error on busy managers - manager = setupBusyManager(t) - chunk, err = manager.LoadChunk(2, 1, 0) - require.NoError(t, err) - assert.Nil(t, chunk) -} - -func TestManager_Take(t *testing.T) { - store := setupStore(t) - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - // nil manager should return error - _, err = (*snapshots.Manager)(nil).Create(1) - require.Error(t, err) - - // creating a snapshot at a lower height than the latest should error - _, err = manager.Create(3) - require.Error(t, err) - - // creating a snapshot at a higher height should be fine, and should return it - snapshot, err := manager.Create(5) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 5, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - // creating a snapshot while a different snapshot is being created should error - manager = setupBusyManager(t) - _, err = manager.Create(9) - require.Error(t, err) -} - -func TestManager_Prune(t *testing.T) { - store := setupStore(t) - manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - - pruned, err := manager.Prune(2) - require.NoError(t, err) - assert.EqualValues(t, 1, pruned) - - list, err := manager.List() - require.NoError(t, err) - assert.Len(t, list, 3) - - // Prune should error while a snapshot is being taken - manager = setupBusyManager(t) - _, err = manager.Prune(2) - require.Error(t, err) -} - -func TestManager_Restore(t *testing.T) { - store := setupStore(t) - target := &mockCommitSnapshotter{} - extSnapshotter := newExtSnapshotter(0) - manager := snapshots.NewManager(store, opts, target, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - expectItems := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - - chunks := snapshotItems(expectItems, newExtSnapshotter(10)) - - // Restore errors on invalid format - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: 0, - Hash: []byte{1, 2, 3}, - Chunks: uint32(len(chunks)), - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - require.ErrorIs(t, err, types.ErrUnknownFormat) - - // Restore errors on no chunks - err = manager.Restore(types.Snapshot{Height: 3, Format: types.CurrentFormat, Hash: []byte{1, 2, 3}}) - require.Error(t, err) - - // Restore errors on chunk and chunkhashes mismatch - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 4, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - - // Starting a restore works - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 1, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.NoError(t, err) - - // While the restore is in progress, any other operations fail - _, err = manager.Create(4) - require.Error(t, err) - - _, err = manager.Prune(1) - require.Error(t, err) - - // Feeding an invalid chunk should error due to invalid checksum, but not abort restoration. - _, err = manager.RestoreChunk([]byte{9, 9, 9}) - require.Error(t, err) - require.True(t, errors.Is(err, types.ErrChunkHashMismatch)) - - // Feeding the chunks should work - for i, chunk := range chunks { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } - - assert.Equal(t, expectItems, target.items) - assert.Equal(t, 10, len(extSnapshotter.state)) - - // The snapshot is saved in local snapshot store - snapshots, err := store.List() - require.NoError(t, err) - snapshot := snapshots[0] - require.Equal(t, uint64(3), snapshot.Height) - require.Equal(t, types.CurrentFormat, snapshot.Format) - - // Starting a new restore should fail now, because the target already has contents. - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 3, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.Error(t, err) - - // But if we clear out the target we should be able to start a new restore. This time we'll - // fail it with a checksum error. That error should stop the operation, so that we can do - // a prune operation right after. - target.items = nil - err = manager.Restore(types.Snapshot{ - Height: 3, - Format: types.CurrentFormat, - Hash: []byte{1, 2, 3}, - Chunks: 1, - Metadata: types.Metadata{ChunkHashes: checksums(chunks)}, - }) - require.NoError(t, err) - - // Feeding the chunks should work - for i, chunk := range chunks { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } -} - -func TestManager_TakeError(t *testing.T) { - snapshotter := &mockErrorCommitSnapshotter{} - store, err := snapshots.NewStore(t.TempDir()) - require.NoError(t, err) - manager := snapshots.NewManager(store, opts, snapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - - _, err = manager.Create(1) - require.Error(t, err) -} - -func TestSnapshot_Take_Restore(t *testing.T) { - store := setupStore(t) - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - // creating a snapshot at a higher height should be fine, and should return it - snapshot, err := manager.Create(5) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 5, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - err = manager.Restore(*snapshot) - require.NoError(t, err) - - // Feeding the chunks should work - for i, chunk := range readChunks(chunks) { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } - - // The snapshot is saved in local snapshot store - snapshots, err := store.List() - require.NoError(t, err) - require.Equal(t, uint64(5), snapshots[0].Height) - require.Equal(t, types.CurrentFormat, snapshots[0].Format) - - // Starting a new restore should fail now, because the target already has contents. - err = manager.Restore(*snapshot) - require.Error(t, err) - - storeSnapshot, chunks, err = store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - // Feeding the chunks should work - for i, chunk := range readChunks(chunks) { - done, err := manager.RestoreChunk(chunk) - require.NoError(t, err) - if i == len(chunks)-1 { - assert.True(t, done) - } else { - assert.False(t, done) - } - } - - assert.Equal(t, items, commitSnapshotter.items) - assert.Equal(t, 10, len(extSnapshotter.state)) - - snapshots, err = store.List() - require.NoError(t, err) - require.Equal(t, uint64(5), snapshots[0].Height) - require.Equal(t, types.CurrentFormat, snapshots[0].Format) -} - -func TestSnapshot_Take_Prune(t *testing.T) { - store := setupStore(t) - - items := [][]byte{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - commitSnapshotter := &mockCommitSnapshotter{ - items: items, - } - extSnapshotter := newExtSnapshotter(10) - - expectChunks := snapshotItems(items, extSnapshotter) - manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, coretesting.NewNopLogger()) - err := manager.RegisterExtensions(extSnapshotter) - require.NoError(t, err) - - // creating a snapshot at height 4 - snapshot, err := manager.Create(4) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 4, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - pruned, err := manager.Prune(1) - require.NoError(t, err) - assert.EqualValues(t, 4, pruned) - - // creating a snapshot at a same height 4, should be error - // since we prune all the previous snapshot except the latest at height 4 - _, err = manager.Create(4) - require.Error(t, err) - - // prune all - pruned, err = manager.Prune(0) - require.NoError(t, err) - assert.EqualValues(t, 1, pruned) - - // creating a snapshot at a same height 4, should be true since we prune all the previous snapshot - snapshot, err = manager.Create(4) - require.NoError(t, err) - - assert.Equal(t, &types.Snapshot{ - Height: 4, - Format: commitSnapshotter.SnapshotFormat(), - Chunks: 1, - Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b}, - Metadata: types.Metadata{ - ChunkHashes: checksums(expectChunks), - }, - }, snapshot) - - storeSnapshot, chunks, err := store.Load(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, storeSnapshot) - assert.Equal(t, expectChunks, readChunks(chunks)) - - pruned, err = manager.Prune(2) - require.NoError(t, err) - assert.EqualValues(t, 0, pruned) - - list, err := manager.List() - require.NoError(t, err) - assert.Len(t, list, 1) - - // Prune should error while a snapshot is being taken - manager = setupBusyManager(t) - _, err = manager.Prune(2) - require.Error(t, err) -} diff --git a/store/v2/snapshots/options.go b/store/v2/snapshots/options.go deleted file mode 100644 index 565a0ce105..0000000000 --- a/store/v2/snapshots/options.go +++ /dev/null @@ -1,18 +0,0 @@ -package snapshots - -// SnapshotOptions defines the snapshot strategy used when determining which -// heights are snapshotted for state sync. -type SnapshotOptions struct { - // Interval defines at which heights the snapshot is taken. - Interval uint64 - - // KeepRecent defines how many snapshots to keep in heights. - KeepRecent uint32 -} - -func NewSnapshotOptions(interval uint64, keepRecent uint32) SnapshotOptions { - return SnapshotOptions{ - Interval: interval, - KeepRecent: keepRecent, - } -} diff --git a/store/v2/snapshots/snapshotter.go b/store/v2/snapshots/snapshotter.go deleted file mode 100644 index 9b054060a3..0000000000 --- a/store/v2/snapshots/snapshotter.go +++ /dev/null @@ -1,53 +0,0 @@ -package snapshots - -import ( - protoio "github.com/cosmos/gogoproto/io" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/snapshots/types" -) - -// CommitSnapshotter defines an API for creating and restoring snapshots of the -// commitment state. -type CommitSnapshotter interface { - // Snapshot writes a snapshot of the commitment state at the given version. - Snapshot(version uint64, protoWriter protoio.Writer) error - - // Restore restores the commitment state from the snapshot reader. - Restore(version uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges) (types.SnapshotItem, error) -} - -// StorageSnapshotter defines an API for restoring snapshots of the storage state. -type StorageSnapshotter interface { - // Restore restores the storage state from the given channel. - Restore(version uint64, chStorage <-chan *corestore.StateChanges) error -} - -// ExtensionPayloadReader read extension payloads, -// it returns io.EOF when reached either end of stream or the extension boundaries. -type ExtensionPayloadReader = func() ([]byte, error) - -// ExtensionPayloadWriter is a helper to write extension payloads to underlying stream. -type ExtensionPayloadWriter = func([]byte) error - -// ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream. -// ExtensionSnapshotter has an unique name and manages it's own internal formats. -type ExtensionSnapshotter interface { - // SnapshotName returns the name of snapshotter, it should be unique in the manager. - SnapshotName() string - - // SnapshotFormat returns the default format the extension snapshotter use to encode the - // payloads when taking a snapshot. - // It's defined within the extension, different from the global format for the whole state-sync snapshot. - SnapshotFormat() uint32 - - // SupportedFormats returns a list of formats it can restore from. - SupportedFormats() []uint32 - - // SnapshotExtension writes extension payloads into the underlying protobuf stream. - SnapshotExtension(height uint64, payloadWriter ExtensionPayloadWriter) error - - // RestoreExtension restores an extension state snapshot, - // the payload reader returns `io.EOF` when reached the extension boundaries. - RestoreExtension(height uint64, format uint32, payloadReader ExtensionPayloadReader) error -} diff --git a/store/v2/snapshots/store.go b/store/v2/snapshots/store.go deleted file mode 100644 index 50c8e1dbd6..0000000000 --- a/store/v2/snapshots/store.go +++ /dev/null @@ -1,439 +0,0 @@ -package snapshots - -import ( - "crypto/sha256" - "encoding/binary" - "fmt" - "hash" - "io" - "math" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - - "github.com/cosmos/gogoproto/proto" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/errors" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/snapshots/types" -) - -const ( - // keyPrefixSnapshot is the prefix for snapshot database keys - keyPrefixSnapshot byte = 0x01 -) - -// Store is a snapshot store, containing snapshot metadata and binary chunks. -type Store struct { - dir string - - mtx sync.Mutex - saving map[uint64]bool // heights currently being saved -} - -// NewStore creates a new snapshot store. -func NewStore(dir string) (*Store, error) { - if dir == "" { - return nil, errors.Wrap(storeerrors.ErrLogic, "snapshot directory not given") - } - err := os.MkdirAll(dir, 0o755) - if err != nil { - return nil, errors.Wrapf(err, "failed to create snapshot directory %q", dir) - } - err = os.MkdirAll(filepath.Join(dir, "metadata"), 0o750) - if err != nil { - return nil, errors.Wrapf(err, "failed to create snapshot metadata directory %q", dir) - } - - return &Store{ - dir: dir, - saving: make(map[uint64]bool), - }, nil -} - -// Delete deletes a snapshot. -func (s *Store) Delete(height uint64, format uint32) error { - s.mtx.Lock() - saving := s.saving[height] - s.mtx.Unlock() - if saving { - return errors.Wrapf(storeerrors.ErrConflict, - "snapshot for height %v format %v is currently being saved", height, format) - } - if err := os.RemoveAll(s.pathSnapshot(height, format)); err != nil { - return errors.Wrapf(err, "failed to delete snapshot chunks for height %v format %v", height, format) - } - if err := os.RemoveAll(s.pathMetadata(height, format)); err != nil { - return errors.Wrapf(err, "failed to delete snapshot metadata for height %v format %v", height, format) - } - return nil -} - -// Get fetches snapshot info from the database. -func (s *Store) Get(height uint64, format uint32) (*types.Snapshot, error) { - if _, err := os.Stat(s.pathMetadata(height, format)); os.IsNotExist(err) { - return nil, nil - } - bytes, err := os.ReadFile(s.pathMetadata(height, format)) - if err != nil { - return nil, errors.Wrapf(err, "failed to fetch snapshot metadata for height %v format %v", - height, format) - } - snapshot := &types.Snapshot{} - err = proto.Unmarshal(bytes, snapshot) - if err != nil { - return nil, errors.Wrapf(err, "failed to decode snapshot metadata for height %v format %v", - height, format) - } - if snapshot.Metadata.ChunkHashes == nil { - snapshot.Metadata.ChunkHashes = [][]byte{} - } - return snapshot, nil -} - -// GetLatest fetches the latest snapshot from the database, if any. -func (s *Store) GetLatest() (*types.Snapshot, error) { - metadata, err := os.ReadDir(s.pathMetadataDir()) - if err != nil { - return nil, errors.Wrap(err, "failed to list snapshot metadata") - } - if len(metadata) == 0 { - return nil, nil - } - // file system may not guarantee the order of the files, so we sort them lexically - sort.Slice(metadata, func(i, j int) bool { return metadata[i].Name() < metadata[j].Name() }) - - path := filepath.Join(s.pathMetadataDir(), metadata[len(metadata)-1].Name()) - if err := s.validateMetadataPath(path); err != nil { - return nil, err - } - bz, err := os.ReadFile(path) - if err != nil { - return nil, errors.Wrapf(err, "failed to read latest snapshot metadata %s", path) - } - - snapshot := &types.Snapshot{} - err = proto.Unmarshal(bz, snapshot) - if err != nil { - return nil, errors.Wrapf(err, "failed to decode latest snapshot metadata %s", path) - } - return snapshot, nil -} - -// List lists snapshots, in reverse order (newest first). -func (s *Store) List() ([]*types.Snapshot, error) { - metadata, err := os.ReadDir(s.pathMetadataDir()) - if err != nil { - return nil, errors.Wrap(err, "failed to list snapshot metadata") - } - // file system may not guarantee the order of the files, so we sort them lexically - sort.Slice(metadata, func(i, j int) bool { return metadata[i].Name() < metadata[j].Name() }) - - snapshots := make([]*types.Snapshot, len(metadata)) - for i, entry := range metadata { - path := filepath.Join(s.pathMetadataDir(), entry.Name()) - if err := s.validateMetadataPath(path); err != nil { - return nil, err - } - bz, err := os.ReadFile(path) - if err != nil { - return nil, errors.Wrapf(err, "failed to read snapshot metadata %s", entry.Name()) - } - snapshot := &types.Snapshot{} - err = proto.Unmarshal(bz, snapshot) - if err != nil { - return nil, errors.Wrapf(err, "failed to decode snapshot metadata %s", entry.Name()) - } - snapshots[len(metadata)-1-i] = snapshot - } - return snapshots, nil -} - -// Load loads a snapshot (both metadata and binary chunks). The chunks must be consumed and closed. -// Returns nil if the snapshot does not exist. -func (s *Store) Load(height uint64, format uint32) (*types.Snapshot, <-chan io.ReadCloser, error) { - snapshot, err := s.Get(height, format) - if snapshot == nil || err != nil { - return nil, nil, err - } - - ch := make(chan io.ReadCloser) - go func() { - defer close(ch) - for i := uint32(0); i < snapshot.Chunks; i++ { - pr, pw := io.Pipe() - ch <- pr - chunk, err := s.loadChunkFile(height, format, i) - if err != nil { - _ = pw.CloseWithError(err) - return - } - defer chunk.Close() - _, err = io.Copy(pw, chunk) - if err != nil { - _ = pw.CloseWithError(err) - return - } - chunk.Close() - pw.Close() - } - }() - - return snapshot, ch, nil -} - -// LoadChunk loads a chunk from disk, or returns nil if it does not exist. The caller must call -// Close() on it when done. -func (s *Store) LoadChunk(height uint64, format, chunk uint32) (io.ReadCloser, error) { - path := s.PathChunk(height, format, chunk) - file, err := os.Open(path) - if os.IsNotExist(err) { - return nil, nil - } - return file, err -} - -// loadChunkFile loads a chunk from disk, and errors if it does not exist. -func (s *Store) loadChunkFile(height uint64, format, chunk uint32) (io.ReadCloser, error) { - path := s.PathChunk(height, format, chunk) - return os.Open(path) -} - -// Prune removes old snapshots. The given number of most recent heights (regardless of format) are retained. -func (s *Store) Prune(retain uint32) (uint64, error) { - metadata, err := os.ReadDir(s.pathMetadataDir()) - if err != nil { - return 0, errors.Wrap(err, "failed to list snapshot metadata") - } - - pruned := uint64(0) - prunedHeights := make(map[uint64]bool) - skip := make(map[uint64]bool) - for i := len(metadata) - 1; i >= 0; i-- { - height, format, err := s.parseMetadataFilename(metadata[i].Name()) - if err != nil { - return 0, err - } - - if skip[height] || uint32(len(skip)) < retain { - skip[height] = true - continue - } - err = s.Delete(height, format) - if err != nil { - return 0, errors.Wrap(err, "failed to prune snapshots") - } - pruned++ - prunedHeights[height] = true - } - // Since Delete() deletes a specific format, while we want to prune a height, we clean up - // the height directory as well - for height, ok := range prunedHeights { - if ok { - err = os.Remove(s.pathHeight(height)) - if err != nil { - return 0, errors.Wrapf(err, "failed to remove snapshot directory for height %v", height) - } - } - } - return pruned, nil -} - -// Save saves a snapshot to disk, returning it. -func (s *Store) Save( - height uint64, format uint32, chunks <-chan io.ReadCloser, -) (*types.Snapshot, error) { - defer DrainChunks(chunks) - if height == 0 { - return nil, errors.Wrap(storeerrors.ErrLogic, "snapshot height cannot be 0") - } - - s.mtx.Lock() - saving := s.saving[height] - s.saving[height] = true - s.mtx.Unlock() - if saving { - return nil, errors.Wrapf(storeerrors.ErrConflict, - "a snapshot for height %v is already being saved", height) - } - defer func() { - s.mtx.Lock() - delete(s.saving, height) - s.mtx.Unlock() - }() - - snapshot := &types.Snapshot{ - Height: height, - Format: format, - } - - // create height directory or do nothing - if err := os.MkdirAll(s.pathHeight(height), 0o750); err != nil { - return nil, errors.Wrapf(err, "failed to create snapshot directory for height %v", height) - } - // create format directory or fail (if for example the format directory already exists) - if err := os.Mkdir(s.pathSnapshot(height, format), 0o750); err != nil { - return nil, errors.Wrapf(err, "failed to create snapshot directory for height %v format %v", height, format) - } - - index := uint32(0) - snapshotHasher := sha256.New() - chunkHasher := sha256.New() - for chunkBody := range chunks { - if err := s.saveChunk(chunkBody, index, snapshot, chunkHasher, snapshotHasher); err != nil { - return nil, err - } - index++ - } - snapshot.Chunks = index - snapshot.Hash = snapshotHasher.Sum(nil) - return snapshot, s.saveSnapshot(snapshot) -} - -// saveChunk saves the given chunkBody with the given index to its appropriate path on disk. -// The hash of the chunk is appended to the snapshot's metadata, -// and the overall snapshot hash is updated with the chunk content too. -func (s *Store) saveChunk(chunkBody io.ReadCloser, index uint32, snapshot *types.Snapshot, chunkHasher, snapshotHasher hash.Hash) error { - defer chunkBody.Close() - - path := s.PathChunk(snapshot.Height, snapshot.Format, index) - chunkFile, err := os.Create(path) - if err != nil { - return errors.Wrapf(err, "failed to create snapshot chunk file %q", path) - } - defer chunkFile.Close() - - chunkHasher.Reset() - if _, err := io.Copy(io.MultiWriter(chunkFile, chunkHasher, snapshotHasher), chunkBody); err != nil { - return errors.Wrapf(err, "failed to generate snapshot chunk %d", index) - } - - if err := chunkFile.Close(); err != nil { - return errors.Wrapf(err, "failed to close snapshot chunk file %d", index) - } - - if err := chunkBody.Close(); err != nil { - return errors.Wrapf(err, "failed to close snapshot chunk body %d", index) - } - - snapshot.Metadata.ChunkHashes = append(snapshot.Metadata.ChunkHashes, chunkHasher.Sum(nil)) - return nil -} - -// saveChunkContent save the chunk to disk -func (s *Store) saveChunkContent(chunk []byte, index uint32, snapshot *types.Snapshot) error { - path := s.PathChunk(snapshot.Height, snapshot.Format, index) - return os.WriteFile(path, chunk, 0o600) -} - -// saveSnapshot saves snapshot metadata to the database. -func (s *Store) saveSnapshot(snapshot *types.Snapshot) error { - value, err := proto.Marshal(snapshot) - if err != nil { - return errors.Wrap(err, "failed to encode snapshot metadata") - } - err = os.WriteFile(s.pathMetadata(snapshot.Height, snapshot.Format), value, 0o600) - if err != nil { - return errors.Wrap(err, "failed to write snapshot metadata") - } - return nil -} - -// pathHeight generates the path to a height, containing multiple snapshot formats. -func (s *Store) pathHeight(height uint64) string { - return filepath.Join(s.dir, strconv.FormatUint(height, 10)) -} - -// pathSnapshot generates a snapshot path, as a specific format under a height. -func (s *Store) pathSnapshot(height uint64, format uint32) string { - return filepath.Join(s.pathHeight(height), strconv.FormatUint(uint64(format), 10)) -} - -func (s *Store) pathMetadataDir() string { - return filepath.Join(s.dir, "metadata") -} - -// pathMetadata generates a snapshot metadata path. -func (s *Store) pathMetadata(height uint64, format uint32) string { - return filepath.Join(s.pathMetadataDir(), fmt.Sprintf("%020d-%08d", height, format)) -} - -// PathChunk generates a snapshot chunk path. -func (s *Store) PathChunk(height uint64, format, chunk uint32) string { - return filepath.Join(s.pathSnapshot(height, format), strconv.FormatUint(uint64(chunk), 10)) -} - -func (s *Store) parseMetadataFilename(filename string) (height uint64, format uint32, err error) { - parts := strings.Split(filename, "-") - if len(parts) != 2 { - return 0, 0, fmt.Errorf("invalid snapshot metadata filename %s", filename) - } - height, err = strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return 0, 0, errors.Wrapf(err, "invalid snapshot metadata filename %s", filename) - } - var f uint64 - f, err = strconv.ParseUint(parts[1], 10, 32) - if err != nil { - return 0, 0, errors.Wrapf(err, "invalid snapshot metadata filename %s", filename) - } - format = uint32(f) - if filename != filepath.Base(s.pathMetadata(height, format)) { - return 0, 0, fmt.Errorf("invalid snapshot metadata filename %s", filename) - } - return height, format, nil -} - -func (s *Store) validateMetadataPath(path string) error { - dir, f := filepath.Split(path) - if dir != fmt.Sprintf("%s/", s.pathMetadataDir()) { - return fmt.Errorf("invalid snapshot metadata path %s", path) - } - _, _, err := s.parseMetadataFilename(f) - return err -} - -// legacyV1DecodeKey decodes a legacy snapshot key used in a raw kv store. -func legacyV1DecodeKey(k []byte) (uint64, uint32, error) { - if len(k) != 13 { - return 0, 0, errors.Wrapf(storeerrors.ErrLogic, "invalid snapshot key with length %v", len(k)) - } - if k[0] != keyPrefixSnapshot { - return 0, 0, errors.Wrapf(storeerrors.ErrLogic, "invalid snapshot key prefix %x", k[0]) - } - - height := binary.BigEndian.Uint64(k[1:9]) - format := binary.BigEndian.Uint32(k[9:13]) - return height, format, nil -} - -// legacyV1EncodeKey encodes a snapshot key for use in a raw kv store. -func legacyV1EncodeKey(height uint64, format uint32) []byte { - k := make([]byte, 13) - k[0] = keyPrefixSnapshot - binary.BigEndian.PutUint64(k[1:], height) - binary.BigEndian.PutUint32(k[9:], format) - return k -} - -func (s *Store) MigrateFromV1(db corestore.KVStore) error { - itr, err := db.Iterator(legacyV1EncodeKey(0, 0), legacyV1EncodeKey(math.MaxUint64, math.MaxUint32)) - if err != nil { - return err - } - defer itr.Close() - for ; itr.Valid(); itr.Next() { - height, format, err := legacyV1DecodeKey(itr.Key()) - if err != nil { - return err - } - if err := os.WriteFile(s.pathMetadata(height, format), itr.Value(), 0o600); err != nil { - return errors.Wrapf(err, "failed to write snapshot metadata %q", s.pathMetadata(height, format)) - } - } - return nil -} diff --git a/store/v2/snapshots/store_test.go b/store/v2/snapshots/store_test.go deleted file mode 100644 index 0b11d12ddc..0000000000 --- a/store/v2/snapshots/store_test.go +++ /dev/null @@ -1,357 +0,0 @@ -package snapshots_test - -import ( - "bytes" - "errors" - "io" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/v2/snapshots" - "cosmossdk.io/store/v2/snapshots/types" -) - -func setupStore(t *testing.T) *snapshots.Store { - t.Helper() - store, err := snapshots.NewStore(t.TempDir()) - require.NoError(t, err) - - _, err = store.Save(1, 1, makeChunks([][]byte{ - {1, 1, 0}, {1, 1, 1}, - })) - require.NoError(t, err) - _, err = store.Save(2, 1, makeChunks([][]byte{ - {2, 1, 0}, {2, 1, 1}, - })) - require.NoError(t, err) - _, err = store.Save(2, 2, makeChunks([][]byte{ - {2, 2, 0}, {2, 2, 1}, {2, 2, 2}, - })) - require.NoError(t, err) - _, err = store.Save(3, 2, makeChunks([][]byte{ - {3, 2, 0}, {3, 2, 1}, {3, 2, 2}, - })) - require.NoError(t, err) - - return store -} - -func TestNewStore(t *testing.T) { - _, err := snapshots.NewStore(t.TempDir()) - - require.NoError(t, err) -} - -func TestNewStore_ErrNoDir(t *testing.T) { - _, err := snapshots.NewStore("") - require.Error(t, err) -} - -func TestStore_Delete(t *testing.T) { - store := setupStore(t) - // Deleting a snapshot should remove it - err := store.Delete(2, 2) - require.NoError(t, err) - - snapshot, err := store.Get(2, 2) - require.NoError(t, err) - assert.Nil(t, snapshot) - - snapshots, err := store.List() - require.NoError(t, err) - assert.Len(t, snapshots, 3) - - // Deleting it again should not error - err = store.Delete(2, 2) - require.NoError(t, err) - - // Deleting a snapshot being saved should error - ch := make(chan io.ReadCloser) - go func() { - _, err := store.Save(9, 1, ch) - require.NoError(t, err) - }() - - time.Sleep(10 * time.Millisecond) - err = store.Delete(9, 1) - require.Error(t, err) - - // But after it's saved it should work - close(ch) - time.Sleep(10 * time.Millisecond) - err = store.Delete(9, 1) - require.NoError(t, err) -} - -func TestStore_Get(t *testing.T) { - store := setupStore(t) - - // Loading a missing snapshot should return nil - snapshot, err := store.Get(9, 9) - require.NoError(t, err) - assert.Nil(t, snapshot) - - // Loading a snapshot should returns its metadata - snapshot, err = store.Get(2, 1) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 2, - Format: 1, - Chunks: 2, - Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{ - {2, 1, 0}, {2, 1, 1}, - }), - }, - }, snapshot) -} - -func TestStore_GetLatest(t *testing.T) { - store := setupStore(t) - // Loading a missing snapshot should return nil - snapshot, err := store.GetLatest() - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 3, - Format: 2, - Chunks: 3, - Hash: hash([][]byte{ - {3, 2, 0}, - {3, 2, 1}, - {3, 2, 2}, - }), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{ - {3, 2, 0}, - {3, 2, 1}, - {3, 2, 2}, - }), - }, - }, snapshot) -} - -func TestStore_List(t *testing.T) { - store := setupStore(t) - snapshots, err := store.List() - require.NoError(t, err) - - require.Equal(t, []*types.Snapshot{ - { - Height: 3, Format: 2, Chunks: 3, Hash: hash([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}})}, - }, - { - Height: 2, Format: 2, Chunks: 3, Hash: hash([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}})}, - }, - { - Height: 2, Format: 1, Chunks: 2, Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 1, 0}, {2, 1, 1}})}, - }, - { - Height: 1, Format: 1, Chunks: 2, Hash: hash([][]byte{{1, 1, 0}, {1, 1, 1}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{1, 1, 0}, {1, 1, 1}})}, - }, - }, snapshots) -} - -func TestStore_Load(t *testing.T) { - store := setupStore(t) - // Loading a missing snapshot should return nil - snapshot, chunks, err := store.Load(9, 9) - require.NoError(t, err) - assert.Nil(t, snapshot) - assert.Nil(t, chunks) - - // Loading a snapshot should returns its metadata and chunks - snapshot, chunks, err = store.Load(2, 1) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 2, - Format: 1, - Chunks: 2, - Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{ - {2, 1, 0}, {2, 1, 1}, - }), - }, - }, snapshot) - - for i := uint32(0); i < snapshot.Chunks; i++ { - reader, ok := <-chunks - require.True(t, ok) - chunk, err := io.ReadAll(reader) - require.NoError(t, err) - err = reader.Close() - require.NoError(t, err) - assert.Equal(t, []byte{2, 1, byte(i)}, chunk) - } - assert.Empty(t, chunks) -} - -func TestStore_LoadChunk(t *testing.T) { - store := setupStore(t) - // Loading a missing snapshot should return nil - chunk, err := store.LoadChunk(9, 9, 0) - require.NoError(t, err) - assert.Nil(t, chunk) - - // Loading a missing chunk index should return nil - chunk, err = store.LoadChunk(2, 1, 2) - require.NoError(t, err) - require.Nil(t, chunk) - - // Loading a chunk should returns a content reader - chunk, err = store.LoadChunk(2, 1, 0) - require.NoError(t, err) - require.NotNil(t, chunk) - body, err := io.ReadAll(chunk) - require.NoError(t, err) - assert.Equal(t, []byte{2, 1, 0}, body) - err = chunk.Close() - require.NoError(t, err) -} - -func TestStore_Prune(t *testing.T) { - store := setupStore(t) - // Pruning too many snapshots should be fine - pruned, err := store.Prune(4) - require.NoError(t, err) - assert.EqualValues(t, 0, pruned) - - snapshots, err := store.List() - require.NoError(t, err) - assert.Len(t, snapshots, 4) - - // Pruning until the last two heights should leave three snapshots (for two heights) - pruned, err = store.Prune(2) - require.NoError(t, err) - assert.EqualValues(t, 1, pruned) - - snapshots, err = store.List() - require.NoError(t, err) - require.Equal(t, []*types.Snapshot{ - { - Height: 3, Format: 2, Chunks: 3, Hash: hash([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{3, 2, 0}, {3, 2, 1}, {3, 2, 2}})}, - }, - { - Height: 2, Format: 2, Chunks: 3, Hash: hash([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 2, 0}, {2, 2, 1}, {2, 2, 2}})}, - }, - { - Height: 2, Format: 1, Chunks: 2, Hash: hash([][]byte{{2, 1, 0}, {2, 1, 1}}), - Metadata: types.Metadata{ChunkHashes: checksums([][]byte{{2, 1, 0}, {2, 1, 1}})}, - }, - }, snapshots) - - // Pruning all heights should also be fine - pruned, err = store.Prune(0) - require.NoError(t, err) - assert.EqualValues(t, 3, pruned) - - snapshots, err = store.List() - require.NoError(t, err) - assert.Empty(t, snapshots) -} - -func TestStore_Save(t *testing.T) { - t.Parallel() - store := setupStore(t) - // Saving a snapshot should work - snapshot, err := store.Save(4, 1, makeChunks([][]byte{{1}, {2}})) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{ - Height: 4, - Format: 1, - Chunks: 2, - Hash: hash([][]byte{{1}, {2}}), - Metadata: types.Metadata{ - ChunkHashes: checksums([][]byte{{1}, {2}}), - }, - }, snapshot) - loaded, err := store.Get(snapshot.Height, snapshot.Format) - require.NoError(t, err) - assert.Equal(t, snapshot, loaded) - - // Saving an existing snapshot should error - _, err = store.Save(4, 1, makeChunks([][]byte{{1}, {2}})) - require.Error(t, err) - - // Saving at height 0 should error - _, err = store.Save(0, 1, makeChunks([][]byte{{1}, {2}})) - require.Error(t, err) - - // Saving at format 0 should be fine - _, err = store.Save(1, 0, makeChunks([][]byte{{1}, {2}})) - require.NoError(t, err) - - // Saving a snapshot with no chunks should be fine, as should loading it - _, err = store.Save(5, 1, makeChunks([][]byte{})) - require.NoError(t, err) - snapshot, chunks, err := store.Load(5, 1) - require.NoError(t, err) - assert.Equal(t, &types.Snapshot{Height: 5, Format: 1, Hash: hash([][]byte{}), Metadata: types.Metadata{ChunkHashes: [][]byte{}}}, snapshot) - assert.Empty(t, chunks) - - // Saving a snapshot should error if a chunk reader returns an error, and it should empty out - // the channel - someErr := errors.New("boom") - pr, pw := io.Pipe() - err = pw.CloseWithError(someErr) - require.NoError(t, err) - - ch := make(chan io.ReadCloser, 2) - ch <- pr - ch <- io.NopCloser(bytes.NewBuffer([]byte{0xff})) - close(ch) - - _, err = store.Save(6, 1, ch) - require.Error(t, err) - require.True(t, errors.Is(err, someErr)) - assert.Empty(t, ch) - - // Saving a snapshot should error if a snapshot is already in progress for the same height, - // regardless of format. However, a different height should succeed. - var ( - wgStart, wgDone sync.WaitGroup - mu sync.Mutex - gotErrHeights []uint64 - ) - srcHeights := []uint64{7, 7, 7, 8, 9} - wgStart.Add(len(srcHeights)) - wgDone.Add(len(srcHeights)) - for _, h := range srcHeights { - ch = make(chan io.ReadCloser, 1) - ch <- &ReadCloserMock{} // does not block on a buffered channel - close(ch) - go func(height uint64) { - wgStart.Done() - wgStart.Wait() // wait for all routines started - if _, err = store.Save(height, 1, ch); err != nil { - mu.Lock() - gotErrHeights = append(gotErrHeights, height) - mu.Unlock() - } - wgDone.Done() - }(h) - } - wgDone.Wait() // wait for all routines completed - assert.Equal(t, []uint64{7, 7}, gotErrHeights) -} - -type ReadCloserMock struct{} - -func (r ReadCloserMock) Read(p []byte) (n int, err error) { - return len(p), io.EOF -} - -func (r ReadCloserMock) Close() error { - return nil -} diff --git a/store/v2/snapshots/stream.go b/store/v2/snapshots/stream.go deleted file mode 100644 index 4662d138b2..0000000000 --- a/store/v2/snapshots/stream.go +++ /dev/null @@ -1,120 +0,0 @@ -package snapshots - -import ( - "bufio" - "compress/zlib" - "io" - - protoio "github.com/cosmos/gogoproto/io" - "github.com/cosmos/gogoproto/proto" - - "cosmossdk.io/errors" -) - -const ( - // Do not change chunk size without new snapshot format (must be uniform across nodes) - snapshotChunkSize = uint64(10e6) - snapshotBufferSize = int(snapshotChunkSize) - // Do not change compression level without new snapshot format (must be uniform across nodes) - snapshotCompressionLevel = 7 -) - -type WriteCloser interface { - protoio.WriteCloser - - // CloseWithError closes the writer and sends an error to the reader. - CloseWithError(err error) -} - -// StreamWriter set up a stream pipeline to serialize snapshot nodes: -// Exported Items -> delimited Protobuf -> zlib -> buffer -> chunkWriter -> chan io.ReadCloser -type StreamWriter struct { - chunkWriter *ChunkWriter - bufWriter *bufio.Writer - zWriter *zlib.Writer - protoWriter protoio.WriteCloser -} - -// NewStreamWriter set up a stream pipeline to serialize snapshot DB records. -func NewStreamWriter(ch chan<- io.ReadCloser) *StreamWriter { - chunkWriter := NewChunkWriter(ch, snapshotChunkSize) - bufWriter := bufio.NewWriterSize(chunkWriter, snapshotBufferSize) - zWriter, err := zlib.NewWriterLevel(bufWriter, snapshotCompressionLevel) - if err != nil { - chunkWriter.CloseWithError(errors.Wrap(err, "zlib failure")) - return nil - } - protoWriter := protoio.NewDelimitedWriter(zWriter) - return &StreamWriter{ - chunkWriter: chunkWriter, - bufWriter: bufWriter, - zWriter: zWriter, - protoWriter: protoWriter, - } -} - -// WriteMsg implements protoio.Write interface -func (sw *StreamWriter) WriteMsg(msg proto.Message) error { - return sw.protoWriter.WriteMsg(msg) -} - -// Close implements io.Closer interface -func (sw *StreamWriter) Close() error { - if err := sw.protoWriter.Close(); err != nil { - sw.chunkWriter.CloseWithError(err) - return err - } - if err := sw.bufWriter.Flush(); err != nil { - sw.chunkWriter.CloseWithError(err) - return err - } - return sw.chunkWriter.Close() -} - -// CloseWithError pass error to chunkWriter -func (sw *StreamWriter) CloseWithError(err error) { - sw.chunkWriter.CloseWithError(err) -} - -// StreamReader set up a restore stream pipeline -// chan io.ReadCloser -> chunkReader -> zlib -> delimited Protobuf -> ExportNode -type StreamReader struct { - chunkReader *ChunkReader - zReader io.ReadCloser - protoReader protoio.ReadCloser -} - -// NewStreamReader set up a restore stream pipeline. -func NewStreamReader(chunks <-chan io.ReadCloser) (*StreamReader, error) { - chunkReader := NewChunkReader(chunks) - zReader, err := zlib.NewReader(chunkReader) - if err != nil { - return nil, errors.Wrap(err, "zlib failure") - } - protoReader := protoio.NewDelimitedReader(zReader, snapshotMaxItemSize) - return &StreamReader{ - chunkReader: chunkReader, - zReader: zReader, - protoReader: protoReader, - }, nil -} - -// ReadMsg implements protoio.Reader interface -func (sr *StreamReader) ReadMsg(msg proto.Message) error { - return sr.protoReader.ReadMsg(msg) -} - -// Close implements io.Closer interface -func (sr *StreamReader) Close() error { - var err error - if err1 := sr.protoReader.Close(); err1 != nil { - err = err1 - } - if err2 := sr.zReader.Close(); err2 != nil { - err = err2 - } - if err3 := sr.chunkReader.Close(); err3 != nil { - err = err3 - } - return err -} diff --git a/store/v2/snapshots/types/errors.go b/store/v2/snapshots/types/errors.go deleted file mode 100644 index c1b5db532e..0000000000 --- a/store/v2/snapshots/types/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package types - -import ( - "errors" -) - -var ( - // ErrUnknownFormat is returned when an unknown format is used. - ErrUnknownFormat = errors.New("unknown snapshot format") - - // ErrChunkHashMismatch is returned when chunk hash verification failed. - ErrChunkHashMismatch = errors.New("chunk hash verification failed") - - // ErrInvalidMetadata is returned when the snapshot metadata is invalid. - ErrInvalidMetadata = errors.New("invalid snapshot metadata") - - // ErrInvalidSnapshotVersion is returned when the snapshot version is invalid - ErrInvalidSnapshotVersion = errors.New("invalid snapshot version") -) diff --git a/store/v2/snapshots/types/format.go b/store/v2/snapshots/types/format.go deleted file mode 100644 index 317b6a6e32..0000000000 --- a/store/v2/snapshots/types/format.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// CurrentFormat is the currently used format for snapshots. Snapshots using the same format -// must be identical across all nodes for a given height, so this must be bumped when the binary -// snapshot output changes. -const CurrentFormat uint32 = 3 diff --git a/store/v2/snapshots/types/snapshot.pb.go b/store/v2/snapshots/types/snapshot.pb.go deleted file mode 100644 index 686e11905a..0000000000 --- a/store/v2/snapshots/types/snapshot.pb.go +++ /dev/null @@ -1,2014 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cosmos/store/snapshots/v1/snapshot.proto - -package types - -import ( - fmt "fmt" - _ "github.com/cosmos/gogoproto/gogoproto" - proto "github.com/cosmos/gogoproto/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Snapshot contains Tendermint state sync snapshot info. -type Snapshot struct { - Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` - Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` - Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{0} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetHeight() uint64 { - if m != nil { - return m.Height - } - return 0 -} - -func (m *Snapshot) GetFormat() uint32 { - if m != nil { - return m.Format - } - return 0 -} - -func (m *Snapshot) GetChunks() uint32 { - if m != nil { - return m.Chunks - } - return 0 -} - -func (m *Snapshot) GetHash() []byte { - if m != nil { - return m.Hash - } - return nil -} - -func (m *Snapshot) GetMetadata() Metadata { - if m != nil { - return m.Metadata - } - return Metadata{} -} - -// Metadata contains SDK-specific snapshot metadata. -type Metadata struct { - ChunkHashes [][]byte `protobuf:"bytes,1,rep,name=chunk_hashes,json=chunkHashes,proto3" json:"chunk_hashes,omitempty"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{1} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return m.Size() -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetChunkHashes() [][]byte { - if m != nil { - return m.ChunkHashes - } - return nil -} - -// SnapshotItem is an item contained in a rootmulti.Store snapshot. -// -// Since: cosmos-sdk 0.46 -type SnapshotItem struct { - // item is the specific type of snapshot item. - // - // Types that are valid to be assigned to Item: - // - // *SnapshotItem_Store - // *SnapshotItem_IAVL - // *SnapshotItem_Extension - // *SnapshotItem_ExtensionPayload - Item isSnapshotItem_Item `protobuf_oneof:"item"` -} - -func (m *SnapshotItem) Reset() { *m = SnapshotItem{} } -func (m *SnapshotItem) String() string { return proto.CompactTextString(m) } -func (*SnapshotItem) ProtoMessage() {} -func (*SnapshotItem) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{2} -} -func (m *SnapshotItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotItem.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotItem.Merge(m, src) -} -func (m *SnapshotItem) XXX_Size() int { - return m.Size() -} -func (m *SnapshotItem) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotItem proto.InternalMessageInfo - -type isSnapshotItem_Item interface { - isSnapshotItem_Item() - MarshalTo([]byte) (int, error) - Size() int -} - -type SnapshotItem_Store struct { - Store *SnapshotStoreItem `protobuf:"bytes,1,opt,name=store,proto3,oneof" json:"store,omitempty"` -} -type SnapshotItem_IAVL struct { - IAVL *SnapshotIAVLItem `protobuf:"bytes,2,opt,name=iavl,proto3,oneof" json:"iavl,omitempty"` -} -type SnapshotItem_Extension struct { - Extension *SnapshotExtensionMeta `protobuf:"bytes,3,opt,name=extension,proto3,oneof" json:"extension,omitempty"` -} -type SnapshotItem_ExtensionPayload struct { - ExtensionPayload *SnapshotExtensionPayload `protobuf:"bytes,4,opt,name=extension_payload,json=extensionPayload,proto3,oneof" json:"extension_payload,omitempty"` -} - -func (*SnapshotItem_Store) isSnapshotItem_Item() {} -func (*SnapshotItem_IAVL) isSnapshotItem_Item() {} -func (*SnapshotItem_Extension) isSnapshotItem_Item() {} -func (*SnapshotItem_ExtensionPayload) isSnapshotItem_Item() {} - -func (m *SnapshotItem) GetItem() isSnapshotItem_Item { - if m != nil { - return m.Item - } - return nil -} - -func (m *SnapshotItem) GetStore() *SnapshotStoreItem { - if x, ok := m.GetItem().(*SnapshotItem_Store); ok { - return x.Store - } - return nil -} - -func (m *SnapshotItem) GetIAVL() *SnapshotIAVLItem { - if x, ok := m.GetItem().(*SnapshotItem_IAVL); ok { - return x.IAVL - } - return nil -} - -func (m *SnapshotItem) GetExtension() *SnapshotExtensionMeta { - if x, ok := m.GetItem().(*SnapshotItem_Extension); ok { - return x.Extension - } - return nil -} - -func (m *SnapshotItem) GetExtensionPayload() *SnapshotExtensionPayload { - if x, ok := m.GetItem().(*SnapshotItem_ExtensionPayload); ok { - return x.ExtensionPayload - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SnapshotItem) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SnapshotItem_Store)(nil), - (*SnapshotItem_IAVL)(nil), - (*SnapshotItem_Extension)(nil), - (*SnapshotItem_ExtensionPayload)(nil), - } -} - -// SnapshotStoreItem contains metadata about a snapshotted store. -// -// Since: cosmos-sdk 0.46 -type SnapshotStoreItem struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (m *SnapshotStoreItem) Reset() { *m = SnapshotStoreItem{} } -func (m *SnapshotStoreItem) String() string { return proto.CompactTextString(m) } -func (*SnapshotStoreItem) ProtoMessage() {} -func (*SnapshotStoreItem) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{3} -} -func (m *SnapshotStoreItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotStoreItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotStoreItem.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotStoreItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotStoreItem.Merge(m, src) -} -func (m *SnapshotStoreItem) XXX_Size() int { - return m.Size() -} -func (m *SnapshotStoreItem) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotStoreItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotStoreItem proto.InternalMessageInfo - -func (m *SnapshotStoreItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// SnapshotIAVLItem is an exported IAVL node. -// -// Since: cosmos-sdk 0.46 -type SnapshotIAVLItem struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - // version is block height - Version int64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` - // height is depth of the tree. - Height int32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` -} - -func (m *SnapshotIAVLItem) Reset() { *m = SnapshotIAVLItem{} } -func (m *SnapshotIAVLItem) String() string { return proto.CompactTextString(m) } -func (*SnapshotIAVLItem) ProtoMessage() {} -func (*SnapshotIAVLItem) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{4} -} -func (m *SnapshotIAVLItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotIAVLItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotIAVLItem.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotIAVLItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotIAVLItem.Merge(m, src) -} -func (m *SnapshotIAVLItem) XXX_Size() int { - return m.Size() -} -func (m *SnapshotIAVLItem) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotIAVLItem.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotIAVLItem proto.InternalMessageInfo - -func (m *SnapshotIAVLItem) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *SnapshotIAVLItem) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *SnapshotIAVLItem) GetVersion() int64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *SnapshotIAVLItem) GetHeight() int32 { - if m != nil { - return m.Height - } - return 0 -} - -// SnapshotExtensionMeta contains metadata about an external snapshotter. -// -// Since: cosmos-sdk 0.46 -type SnapshotExtensionMeta struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` -} - -func (m *SnapshotExtensionMeta) Reset() { *m = SnapshotExtensionMeta{} } -func (m *SnapshotExtensionMeta) String() string { return proto.CompactTextString(m) } -func (*SnapshotExtensionMeta) ProtoMessage() {} -func (*SnapshotExtensionMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{5} -} -func (m *SnapshotExtensionMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotExtensionMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotExtensionMeta.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotExtensionMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotExtensionMeta.Merge(m, src) -} -func (m *SnapshotExtensionMeta) XXX_Size() int { - return m.Size() -} -func (m *SnapshotExtensionMeta) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotExtensionMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotExtensionMeta proto.InternalMessageInfo - -func (m *SnapshotExtensionMeta) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *SnapshotExtensionMeta) GetFormat() uint32 { - if m != nil { - return m.Format - } - return 0 -} - -// SnapshotExtensionPayload contains payloads of an external snapshotter. -// -// Since: cosmos-sdk 0.46 -type SnapshotExtensionPayload struct { - Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (m *SnapshotExtensionPayload) Reset() { *m = SnapshotExtensionPayload{} } -func (m *SnapshotExtensionPayload) String() string { return proto.CompactTextString(m) } -func (*SnapshotExtensionPayload) ProtoMessage() {} -func (*SnapshotExtensionPayload) Descriptor() ([]byte, []int) { - return fileDescriptor_3d5cca1aa5b69183, []int{6} -} -func (m *SnapshotExtensionPayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotExtensionPayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotExtensionPayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotExtensionPayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotExtensionPayload.Merge(m, src) -} -func (m *SnapshotExtensionPayload) XXX_Size() int { - return m.Size() -} -func (m *SnapshotExtensionPayload) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotExtensionPayload.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotExtensionPayload proto.InternalMessageInfo - -func (m *SnapshotExtensionPayload) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func init() { - proto.RegisterType((*Snapshot)(nil), "cosmos.store.snapshots.v1.Snapshot") - proto.RegisterType((*Metadata)(nil), "cosmos.store.snapshots.v1.Metadata") - proto.RegisterType((*SnapshotItem)(nil), "cosmos.store.snapshots.v1.SnapshotItem") - proto.RegisterType((*SnapshotStoreItem)(nil), "cosmos.store.snapshots.v1.SnapshotStoreItem") - proto.RegisterType((*SnapshotIAVLItem)(nil), "cosmos.store.snapshots.v1.SnapshotIAVLItem") - proto.RegisterType((*SnapshotExtensionMeta)(nil), "cosmos.store.snapshots.v1.SnapshotExtensionMeta") - proto.RegisterType((*SnapshotExtensionPayload)(nil), "cosmos.store.snapshots.v1.SnapshotExtensionPayload") -} - -func init() { - proto.RegisterFile("cosmos/store/snapshots/v1/snapshot.proto", fileDescriptor_3d5cca1aa5b69183) -} - -var fileDescriptor_3d5cca1aa5b69183 = []byte{ - // 496 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0x8e, 0xd7, 0xb4, 0x74, 0x2f, 0x41, 0xea, 0xac, 0x81, 0x02, 0x87, 0x2c, 0x84, 0x03, 0x91, - 0x80, 0x94, 0x65, 0x1c, 0xb9, 0x50, 0x98, 0x94, 0x09, 0x90, 0x26, 0x4f, 0xe2, 0xc0, 0x65, 0xf2, - 0x56, 0xd3, 0x44, 0x6d, 0xe2, 0xaa, 0xf6, 0x22, 0xfa, 0x2f, 0xf8, 0x23, 0xfc, 0x8f, 0x1d, 0x77, - 0xe4, 0x34, 0x50, 0xfb, 0x47, 0x90, 0xed, 0x26, 0xa0, 0x6d, 0x45, 0xdb, 0xed, 0x7d, 0x2f, 0xdf, - 0xf7, 0xf9, 0xf9, 0xcb, 0x33, 0x44, 0xa7, 0x5c, 0x14, 0x5c, 0xf4, 0x85, 0xe4, 0x33, 0xd6, 0x17, - 0x25, 0x9d, 0x8a, 0x8c, 0x4b, 0xd1, 0xaf, 0x76, 0x1b, 0x10, 0x4f, 0x67, 0x5c, 0x72, 0xfc, 0xc8, - 0x30, 0x63, 0xcd, 0x8c, 0x1b, 0x66, 0x5c, 0xed, 0x3e, 0xde, 0x1e, 0xf1, 0x11, 0xd7, 0xac, 0xbe, - 0xaa, 0x8c, 0x20, 0xfc, 0x81, 0xa0, 0x7b, 0xb4, 0xa2, 0xe1, 0x87, 0xd0, 0xc9, 0x58, 0x3e, 0xca, - 0xa4, 0x87, 0x02, 0x14, 0xd9, 0x64, 0x85, 0x54, 0xff, 0x2b, 0x9f, 0x15, 0x54, 0x7a, 0x1b, 0x01, - 0x8a, 0xee, 0x93, 0x15, 0x52, 0xfd, 0xd3, 0xec, 0xac, 0x1c, 0x0b, 0xaf, 0x65, 0xfa, 0x06, 0x61, - 0x0c, 0x76, 0x46, 0x45, 0xe6, 0xd9, 0x01, 0x8a, 0x5c, 0xa2, 0x6b, 0xbc, 0x0f, 0xdd, 0x82, 0x49, - 0x3a, 0xa4, 0x92, 0x7a, 0xed, 0x00, 0x45, 0x4e, 0xf2, 0x34, 0x5e, 0x3b, 0x6c, 0xfc, 0x69, 0x45, - 0x1d, 0xd8, 0xe7, 0x97, 0x3b, 0x16, 0x69, 0xa4, 0xe1, 0x4b, 0xe8, 0xd6, 0xdf, 0xf0, 0x13, 0x70, - 0xf5, 0x81, 0xc7, 0xea, 0x00, 0x26, 0x3c, 0x14, 0xb4, 0x22, 0x97, 0x38, 0xba, 0x97, 0xea, 0x56, - 0xf8, 0x6b, 0x03, 0xdc, 0xfa, 0x7a, 0x07, 0x92, 0x15, 0xf8, 0x3d, 0xb4, 0xf5, 0x71, 0xfa, 0x86, - 0x4e, 0xf2, 0xe2, 0x3f, 0x33, 0xd4, 0xba, 0x23, 0xf5, 0x49, 0x89, 0x53, 0x8b, 0x18, 0x31, 0xfe, - 0x00, 0x76, 0x4e, 0xab, 0x89, 0x8e, 0xc3, 0x49, 0x9e, 0xdf, 0xc2, 0xe4, 0xe0, 0xed, 0xe7, 0x8f, - 0xca, 0x63, 0xd0, 0x5d, 0x5c, 0xee, 0xd8, 0x0a, 0xa5, 0x16, 0xd1, 0x26, 0xf8, 0x10, 0x36, 0xd9, - 0x37, 0xc9, 0x4a, 0x91, 0xf3, 0x52, 0x07, 0xe9, 0x24, 0xaf, 0x6e, 0xe1, 0xb8, 0x5f, 0x6b, 0x54, - 0x1e, 0xa9, 0x45, 0xfe, 0x9a, 0xe0, 0x13, 0xd8, 0x6a, 0xc0, 0xf1, 0x94, 0xce, 0x27, 0x9c, 0x0e, - 0xf5, 0xcf, 0x70, 0x92, 0xbd, 0xbb, 0x38, 0x1f, 0x1a, 0x69, 0x6a, 0x91, 0x1e, 0xbb, 0xd2, 0x1b, - 0x74, 0xc0, 0xce, 0x25, 0x2b, 0xc2, 0x67, 0xb0, 0x75, 0x2d, 0x28, 0xb5, 0x00, 0x25, 0x2d, 0x4c, - 0xc8, 0x9b, 0x44, 0xd7, 0xe1, 0x04, 0x7a, 0x57, 0xc3, 0xc0, 0x3d, 0x68, 0x8d, 0xd9, 0x5c, 0xd3, - 0x5c, 0xa2, 0x4a, 0xbc, 0x0d, 0xed, 0x8a, 0x4e, 0xce, 0x98, 0x8e, 0xd6, 0x25, 0x06, 0x60, 0x0f, - 0xee, 0x55, 0x6c, 0xd6, 0x04, 0xd4, 0x22, 0x35, 0xfc, 0x67, 0x65, 0xd5, 0xfd, 0xda, 0xf5, 0xca, - 0x86, 0xef, 0xe0, 0xc1, 0x8d, 0x41, 0xdd, 0x34, 0xda, 0xba, 0xfd, 0x0e, 0x5f, 0x83, 0xb7, 0x2e, - 0x13, 0x35, 0x52, 0x9d, 0xac, 0x19, 0xbf, 0x86, 0x83, 0x37, 0xe7, 0x0b, 0x1f, 0x5d, 0x2c, 0x7c, - 0xf4, 0x7b, 0xe1, 0xa3, 0xef, 0x4b, 0xdf, 0xba, 0x58, 0xfa, 0xd6, 0xcf, 0xa5, 0x6f, 0x7d, 0x09, - 0x4d, 0xf6, 0x62, 0x38, 0x8e, 0x73, 0x7e, 0xed, 0x35, 0xcb, 0xf9, 0x94, 0x89, 0x93, 0x8e, 0x7e, - 0x97, 0x7b, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xef, 0xe9, 0x8e, 0x10, 0xf4, 0x03, 0x00, 0x00, -} - -func (m *Snapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x22 - } - if m.Chunks != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Chunks)) - i-- - dAtA[i] = 0x18 - } - if m.Format != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Format)) - i-- - dAtA[i] = 0x10 - } - if m.Height != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ChunkHashes) > 0 { - for iNdEx := len(m.ChunkHashes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ChunkHashes[iNdEx]) - copy(dAtA[i:], m.ChunkHashes[iNdEx]) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.ChunkHashes[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SnapshotItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Item != nil { - { - size := m.Item.Size() - i -= size - if _, err := m.Item.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *SnapshotItem_Store) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_Store) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Store != nil { - { - size, err := m.Store.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *SnapshotItem_IAVL) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_IAVL) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.IAVL != nil { - { - size, err := m.IAVL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *SnapshotItem_Extension) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_Extension) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Extension != nil { - { - size, err := m.Extension.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *SnapshotItem_ExtensionPayload) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotItem_ExtensionPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ExtensionPayload != nil { - { - size, err := m.ExtensionPayload.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshot(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *SnapshotStoreItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotStoreItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotStoreItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotIAVLItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotIAVLItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotIAVLItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Height != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x20 - } - if m.Version != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x18 - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotExtensionMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotExtensionMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotExtensionMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Format != 0 { - i = encodeVarintSnapshot(dAtA, i, uint64(m.Format)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SnapshotExtensionPayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SnapshotExtensionPayload) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotExtensionPayload) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { - offset -= sovSnapshot(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovSnapshot(uint64(m.Height)) - } - if m.Format != 0 { - n += 1 + sovSnapshot(uint64(m.Format)) - } - if m.Chunks != 0 { - n += 1 + sovSnapshot(uint64(m.Chunks)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = m.Metadata.Size() - n += 1 + l + sovSnapshot(uint64(l)) - return n -} - -func (m *Metadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ChunkHashes) > 0 { - for _, b := range m.ChunkHashes { - l = len(b) - n += 1 + l + sovSnapshot(uint64(l)) - } - } - return n -} - -func (m *SnapshotItem) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Item != nil { - n += m.Item.Size() - } - return n -} - -func (m *SnapshotItem_Store) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Store != nil { - l = m.Store.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotItem_IAVL) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IAVL != nil { - l = m.IAVL.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotItem_Extension) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Extension != nil { - l = m.Extension.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotItem_ExtensionPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExtensionPayload != nil { - l = m.ExtensionPayload.Size() - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} -func (m *SnapshotStoreItem) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} - -func (m *SnapshotIAVLItem) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.Version != 0 { - n += 1 + sovSnapshot(uint64(m.Version)) - } - if m.Height != 0 { - n += 1 + sovSnapshot(uint64(m.Height)) - } - return n -} - -func (m *SnapshotExtensionMeta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.Format != 0 { - n += 1 + sovSnapshot(uint64(m.Format)) - } - return n -} - -func (m *SnapshotExtensionPayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Payload) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - return n -} - -func sovSnapshot(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSnapshot(x uint64) (n int) { - return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Snapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - m.Format = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Format |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) - } - m.Chunks = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Chunks |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChunkHashes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChunkHashes = append(m.ChunkHashes, make([]byte, postIndex-iNdEx)) - copy(m.ChunkHashes[len(m.ChunkHashes)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotStoreItem{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_Store{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IAVL", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotIAVLItem{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_IAVL{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotExtensionMeta{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_Extension{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtensionPayload", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SnapshotExtensionPayload{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Item = &SnapshotItem_ExtensionPayload{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotStoreItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotStoreItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotStoreItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotIAVLItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotIAVLItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotIAVLItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotExtensionMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotExtensionMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotExtensionMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - m.Format = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Format |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotExtensionPayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotExtensionPayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotExtensionPayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) - if m.Payload == nil { - m.Payload = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnapshot(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSnapshot - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSnapshot - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSnapshot - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSnapshot = fmt.Errorf("proto: unexpected end of group") -) diff --git a/store/v2/snapshots/types/util.go b/store/v2/snapshots/types/util.go deleted file mode 100644 index 4ffeb73375..0000000000 --- a/store/v2/snapshots/types/util.go +++ /dev/null @@ -1,35 +0,0 @@ -package types - -import ( - "encoding/binary" - - protoio "github.com/cosmos/gogoproto/io" -) - -// WriteExtensionPayload writes an extension payload for current extension snapshotter. -func WriteExtensionPayload(protoWriter protoio.Writer, payload []byte) error { - return protoWriter.WriteMsg(&SnapshotItem{ - Item: &SnapshotItem_ExtensionPayload{ - ExtensionPayload: &SnapshotExtensionPayload{ - Payload: payload, - }, - }, - }) -} - -// Uint64ToBigEndian - marshals uint64 to a big endian byte slice so it can be sorted -func Uint64ToBigEndian(i uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, i) - return b -} - -// BigEndianToUint64 returns an uint64 from big endian encoded bytes. If encoding -// is empty, zero is returned. -func BigEndianToUint64(bz []byte) uint64 { - if len(bz) == 0 { - return 0 - } - - return binary.BigEndian.Uint64(bz) -} diff --git a/store/v2/sonar-project.properties b/store/v2/sonar-project.properties deleted file mode 100644 index 008f93fc9f..0000000000 --- a/store/v2/sonar-project.properties +++ /dev/null @@ -1,16 +0,0 @@ -sonar.projectKey=cosmos-sdk-store -sonar.organization=cosmos - -sonar.projectName=Cosmos SDK - Store -sonar.project.monorepo.enabled=true - -sonar.sources=. -sonar.exclusions=**/*_test.go,**/*.pb.go,**/*.pulsar.go,**/*.pb.gw.go -sonar.coverage.exclusions=**/*_test.go,**/testutil/**,**/*.pb.go,**/*.pb.gw.go,**/*.pulsar.go,test_helpers.go,docs/** -sonar.tests=. -sonar.test.inclusions=**/*_test.go -sonar.go.coverage.reportPaths=coverage.out - -sonar.sourceEncoding=UTF-8 -sonar.scm.provider=git -sonar.scm.forceReloadAll=true diff --git a/store/v2/storage/README.md b/store/v2/storage/README.md deleted file mode 100644 index 48606d6c19..0000000000 --- a/store/v2/storage/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# State Storage (SS) - -The `storage` package contains the state storage (SS) implementation. Specifically, -it contains RocksDB, PebbleDB, and SQLite (Btree) backend implementations of the -`VersionedDatabase` interface. - -The goal of SS is to provide a modular storage backend, i.e. multiple implementations, -to facilitate storing versioned raw key/value pairs in a fast embedded database, -although an embedded database is not required, i.e. you could use a replicated -RDBMS system. - -The responsibility and functions of SS include the following: - -* Provide fast and efficient queries for versioned raw key/value pairs -* Provide versioned CRUD operations -* Provide versioned batching functionality -* Provide versioned iteration (forward and reverse) functionality -* Provide pruning functionality - -All of the functionality provided by an SS backend should work under a versioned -scheme, i.e. a user should be able to get, store, and iterate over keys for the -latest and historical versions efficiently. - -## Backends - -### RocksDB - -The RocksDB implementation is a CGO-based SS implementation. It fully supports -the `VersionedDatabase` API and is arguably the most efficient implementation. It -also supports versioning out-of-the-box using User-defined Timestamps in -ColumnFamilies (CF). However, it requires the CGO dependency which can complicate -an app’s build process. - -### PebbleDB - -The PebbleDB implementation is a native Go SS implementation that is primarily an -alternative to RocksDB. Since it does not support CF, results in the fact that we -need to implement versioning (MVCC) ourselves. This comes with added implementation -complexity and potential performance overhead. However, it is a pure Go implementation -and does not require CGO. - -### SQLite (Btree) - -The SQLite implementation is another CGO-based SS implementation. It fully supports -the `VersionedDatabase` API. The implementation is relatively straightforward and -easy to understand as it’s entirely SQL-based. However, benchmarks show that this -options is least performant, even for reads. This SS backend has a lot of promise, -but needs more benchmarking and potential SQL optimizations, like dedicated tables -for certain aspects of state, e.g. latest state, to be extremely performant. - -## Benchmarks - -Benchmarks for basic operations on all supported native SS implementations can -be found in `store/storage/storage_bench_test.go`. - -At the time of writing, the following benchmarks were performed: - -```shell -name time/op -Get/backend_rocksdb_versiondb_opts-10 7.41µs ± 0% -Get/backend_pebbledb_default_opts-10 6.17µs ± 0% -Get/backend_btree_sqlite-10 29.1µs ± 0% -ApplyChangeset/backend_pebbledb_default_opts-10 5.73ms ± 0% -ApplyChangeset/backend_btree_sqlite-10 56.9ms ± 0% -ApplyChangeset/backend_rocksdb_versiondb_opts-10 4.07ms ± 0% -Iterate/backend_pebbledb_default_opts-10 1.04s ± 0% -Iterate/backend_btree_sqlite-10 1.59s ± 0% -Iterate/backend_rocksdb_versiondb_opts-10 778ms ± 0% -``` - -## Pruning - -Pruning is an implementation and responsibility of the underlying SS backend. -Specifically, the `StorageStore` accepts `store.PruningOption` which defines the -pruning configuration. During `ApplyChangeset`, the `StorageStore` will check if -pruning should occur based on the current height being committed. If so, it will -delegate a `Prune` call on the underlying SS backend, which can be defined specific -to the implementation, e.g. asynchronous or synchronous. - - -## State Sync - -State storage (SS) does not have a direct notion of state sync. Rather, `snapshots.Manager` -is responsible for creating and restoring snapshots of the entire state. The -`snapshots.Manager` has a `StorageSnapshotter` field which is fulfilled by the -`StorageStore` type, specifically it implements the `Restore` method. The `Restore` -method reads off of a provided channel and writes key/value pairs directly to a -batch object which is committed to the underlying SS engine. - -## Non-Consensus Data - - - -## Usage - -An SS backend is meant to be used within a broader store implementation, as it -only stores data for direct and historical query purposes. We define a `Database` -interface in the `storage` package which is mean to be represent a `VersionedDatabase` -with only the necessary methods. The `StorageStore` interface is meant to wrap or -accept this `Database` type, e.g. RocksDB. - -The `StorageStore` interface is an abstraction or wrapper around the backing SS -engine can be seen as the main entry point to using SS. - -Higher up the stack, there should exist a `root.Store` implementation. The `root.Store` -is meant to encapsulate both an SS backend and an SC backend. The SS backend is -defined by this `StorageStore` implementation. - -In short, initialize your SS engine of choice and then provide that to `NewStorageStore` -which will further be provided to `root.Store` as the SS backend. diff --git a/store/v2/storage/database.go b/store/v2/storage/database.go deleted file mode 100644 index fba5998395..0000000000 --- a/store/v2/storage/database.go +++ /dev/null @@ -1,26 +0,0 @@ -package storage - -import ( - "io" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -// Database is an interface that wraps the storage database methods. A wrapper -// is useful for instances where you want to perform logic that is identical for all SS -// backends, such as restoring snapshots. -type Database interface { - NewBatch(version uint64) (store.Batch, error) - Has(storeKey []byte, version uint64, key []byte) (bool, error) - Get(storeKey []byte, version uint64, key []byte) ([]byte, error) - GetLatestVersion() (uint64, error) - SetLatestVersion(version uint64) error - - Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) - - Prune(version uint64) error - - io.Closer -} diff --git a/store/v2/storage/pebbledb/batch.go b/store/v2/storage/pebbledb/batch.go deleted file mode 100644 index 101986878c..0000000000 --- a/store/v2/storage/pebbledb/batch.go +++ /dev/null @@ -1,74 +0,0 @@ -package pebbledb - -import ( - "encoding/binary" - "errors" - "fmt" - - "github.com/cockroachdb/pebble" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type Batch struct { - storage *pebble.DB - batch *pebble.Batch - version uint64 - sync bool -} - -func NewBatch(storage *pebble.DB, version uint64, sync bool) (*Batch, error) { - var versionBz [VersionSize]byte - binary.LittleEndian.PutUint64(versionBz[:], version) - - batch := storage.NewBatch() - - if err := batch.Set([]byte(latestVersionKey), versionBz[:], nil); err != nil { - return nil, fmt.Errorf("failed to write PebbleDB batch: %w", err) - } - - return &Batch{ - storage: storage, - batch: batch, - version: version, - sync: sync, - }, nil -} - -func (b *Batch) Size() int { - return b.batch.Len() -} - -func (b *Batch) Reset() error { - b.batch.Reset() - return nil -} - -func (b *Batch) set(storeKey []byte, tombstone uint64, key, value []byte) error { - prefixedKey := MVCCEncode(prependStoreKey(storeKey, key), b.version) - prefixedVal := MVCCEncode(value, tombstone) - - if err := b.batch.Set(prefixedKey, prefixedVal, nil); err != nil { - return fmt.Errorf("failed to write PebbleDB batch: %w", err) - } - - return nil -} - -func (b *Batch) Set(storeKey, key, value []byte) error { - return b.set(storeKey, 0, key, value) -} - -func (b *Batch) Delete(storeKey, key []byte) error { - return b.set(storeKey, b.version, key, []byte(tombstoneVal)) -} - -func (b *Batch) Write() (err error) { - defer func() { - err = errors.Join(err, b.batch.Close()) - }() - - return b.batch.Commit(&pebble.WriteOptions{Sync: b.sync}) -} diff --git a/store/v2/storage/pebbledb/comparator.go b/store/v2/storage/pebbledb/comparator.go deleted file mode 100644 index 337ff7698d..0000000000 --- a/store/v2/storage/pebbledb/comparator.go +++ /dev/null @@ -1,242 +0,0 @@ -package pebbledb - -import ( - "bytes" - "encoding/binary" - "fmt" - - "github.com/cockroachdb/pebble" -) - -// MVCCComparer returns a PebbleDB Comparer with encoding and decoding routines -// for MVCC control, used to compare and store versioned keys. -// -// Note: This Comparer implementation is largely based on PebbleDB's internal -// MVCC example, which can be found here: -// https://github.com/cockroachdb/pebble/blob/master/cmd/pebble/mvcc.go -var MVCCComparer = &pebble.Comparer{ - Name: "ss_pebbledb_comparator", - - Compare: MVCCKeyCompare, - - AbbreviatedKey: func(k []byte) uint64 { - key, _, ok := SplitMVCCKey(k) - if !ok { - return 0 - } - - return pebble.DefaultComparer.AbbreviatedKey(key) - }, - - Equal: func(a, b []byte) bool { - return MVCCKeyCompare(a, b) == 0 - }, - - Separator: func(dst, a, b []byte) []byte { - aKey, _, ok := SplitMVCCKey(a) - if !ok { - return append(dst, a...) - } - - bKey, _, ok := SplitMVCCKey(b) - if !ok { - return append(dst, a...) - } - - // if the keys are the same just return a - if bytes.Equal(aKey, bKey) { - return append(dst, a...) - } - - n := len(dst) - - // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the - // same semantics as pebble.DefaultComparer, so reuse the latter's Separator - // implementation. - dst = pebble.DefaultComparer.Separator(dst, aKey, bKey) - - // Did we pick a separator different than aKey? If we did not, we can't do - // better than a. - buf := dst[n:] - if bytes.Equal(aKey, buf) { - return append(dst[:n], a...) - } - - // The separator is > aKey, so we only need to add the timestamp sentinel. - return append(dst, 0) - }, - - ImmediateSuccessor: func(dst, a []byte) []byte { - // The key `a` is guaranteed to be a bare prefix: It's a key without a version - // — just a trailing 0-byte to signify the length of the version. For example - // the user key "foo" is encoded as: "foo\0". We need to encode the immediate - // successor to "foo", which in the natural byte ordering is "foo\0". Append - // a single additional zero, to encode the user key "foo\0" with a zero-length - // version. - return append(append(dst, a...), 0) - }, - - Successor: func(dst, a []byte) []byte { - aKey, _, ok := SplitMVCCKey(a) - if !ok { - return append(dst, a...) - } - - n := len(dst) - - // MVCC key comparison uses bytes.Compare on the roachpb.Key, which is the - // same semantics as pebble.DefaultComparer, so reuse the latter's Successor - // implementation. - dst = pebble.DefaultComparer.Successor(dst, aKey) - - // Did we pick a successor different than aKey? If we did not, we can't do - // better than a. - buf := dst[n:] - if bytes.Equal(aKey, buf) { - return append(dst[:n], a...) - } - - // The successor is > aKey, so we only need to add the timestamp sentinel. - return append(dst, 0) - }, - - FormatKey: func(k []byte) fmt.Formatter { - return mvccKeyFormatter{key: k} - }, - - Split: func(k []byte) int { - key, _, ok := SplitMVCCKey(k) - if !ok { - return len(k) - } - - // This matches the behavior of libroach/KeyPrefix. RocksDB requires that - // keys generated via a SliceTransform be comparable with normal encoded - // MVCC keys. Encoded MVCC keys have a suffix indicating the number of - // bytes of timestamp data. MVCC keys without a timestamp have a suffix of - // 0. We're careful in EncodeKey to make sure that the user-key always has - // a trailing 0. If there is no timestamp this falls out naturally. If - // there is a timestamp we prepend a 0 to the encoded timestamp data. - return len(key) + 1 - }, -} - -type mvccKeyFormatter struct { - key []byte -} - -func (f mvccKeyFormatter) Format(s fmt.State, verb rune) { - k, vBz, ok := SplitMVCCKey(f.key) - if ok { - v, _ := decodeUint64Ascending(vBz) - fmt.Fprintf(s, "%s/%d", k, v) - } else { - fmt.Fprintf(s, "%s", f.key) - } -} - -// SplitMVCCKey accepts an MVCC key and returns the "user" key, the MVCC version, -// and a boolean indicating if the provided key is an MVCC key. -// -// Note, internally, we must make a copy of the provided mvccKey argument, which -// typically comes from the Key() method as it's not safe. -func SplitMVCCKey(mvccKey []byte) (key, version []byte, ok bool) { - if len(mvccKey) == 0 { - return nil, nil, false - } - - mvccKeyCopy := bytes.Clone(mvccKey) - - n := len(mvccKeyCopy) - 1 - tsLen := int(mvccKeyCopy[n]) - if n < tsLen { - return nil, nil, false - } - - key = mvccKeyCopy[:n-tsLen] - if tsLen > 0 { - version = mvccKeyCopy[n-tsLen+1 : len(mvccKeyCopy)-1] - } - - return key, version, true -} - -// MVCCKeyCompare compares two MVCC keys. -func MVCCKeyCompare(a, b []byte) int { - aEnd := len(a) - 1 - bEnd := len(b) - 1 - if aEnd < 0 || bEnd < 0 { - // This should never happen unless there is some sort of corruption of - // the keys. This is a little bizarre, but the behavior exactly matches - // engine/db.cc:DBComparator. - return bytes.Compare(a, b) - } - - // Compute the index of the separator between the key and the timestamp. - aSep := aEnd - int(a[aEnd]) - bSep := bEnd - int(b[bEnd]) - if aSep < 0 || bSep < 0 { - // This should never happen unless there is some sort of corruption of - // the keys. This is a little bizarre, but the behavior exactly matches - // engine/db.cc:DBComparator. - return bytes.Compare(a, b) - } - - // compare the "user key" part of the key - if c := bytes.Compare(a[:aSep], b[:bSep]); c != 0 { - return c - } - - // compare the timestamp part of the key - aTS := a[aSep:aEnd] - bTS := b[bSep:bEnd] - if len(aTS) == 0 { - if len(bTS) == 0 { - return 0 - } - return -1 - } else if len(bTS) == 0 { - return 1 - } - - return bytes.Compare(aTS, bTS) -} - -// MVCCEncode encodes a key and version into an MVCC format. -// The format is: \x00[]<#version-bytes> -// If the version is 0, only the key and a null byte are encoded. -func MVCCEncode(key []byte, version uint64) (dst []byte) { - dst = append(dst, key...) - dst = append(dst, 0) - - if version != 0 { - extra := byte(1 + 8) - dst = encodeUint64Ascending(dst, version) - dst = append(dst, extra) - } - - return dst -} - -// encodeUint64Ascending encodes the uint64 value using a big-endian 8 byte -// representation. The bytes are appended to the supplied buffer and -// the final buffer is returned. -func encodeUint64Ascending(dst []byte, v uint64) []byte { - return append( - dst, - byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), - byte(v>>24), byte(v>>16), byte(v>>8), byte(v), - ) -} - -// decodeUint64Ascending decodes a uint64 from the input buffer, treating -// the input as a big-endian 8 byte uint64 representation. The decoded uint64 is -// returned. -func decodeUint64Ascending(b []byte) (uint64, error) { - if len(b) < 8 { - return 0, fmt.Errorf("insufficient bytes to decode uint64 int value; expected 8; got %d", len(b)) - } - - v := binary.BigEndian.Uint64(b) - return v, nil -} diff --git a/store/v2/storage/pebbledb/comparator_test.go b/store/v2/storage/pebbledb/comparator_test.go deleted file mode 100644 index 1affd81b40..0000000000 --- a/store/v2/storage/pebbledb/comparator_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package pebbledb - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMVCCKey(t *testing.T) { - for i := uint64(1); i < 1001; i++ { - keyA := MVCCEncode([]byte("key001"), i) - - key, vBz, ok := SplitMVCCKey(keyA) - - version, err := decodeUint64Ascending(vBz) - require.NoError(t, err) - require.True(t, ok) - require.Equal(t, i, version) - require.Equal(t, []byte("key001"), key) - } -} - -func TestMVCCKeyCompare(t *testing.T) { - testCases := []struct { - keyA []byte - keyB []byte - expected int - }{ - { - // same key, same version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key001"), 1), - expected: 0, - }, - { - // same key, different version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key001"), 2), - expected: -1, - }, - { - // same key, different version (inverse) - keyA: MVCCEncode([]byte("key001"), 2), - keyB: MVCCEncode([]byte("key001"), 1), - expected: 1, - }, - { - // different key, same version - keyA: MVCCEncode([]byte("key001"), 1), - keyB: MVCCEncode([]byte("key009"), 1), - expected: -1, - }, - } - - for _, tc := range testCases { - require.Equalf(t, tc.expected, MVCCKeyCompare(tc.keyA, tc.keyB), "keyA: %s, keyB: %s", tc.keyA, tc.keyB) - } -} diff --git a/store/v2/storage/pebbledb/db.go b/store/v2/storage/pebbledb/db.go deleted file mode 100644 index 4e9df737b5..0000000000 --- a/store/v2/storage/pebbledb/db.go +++ /dev/null @@ -1,399 +0,0 @@ -package pebbledb - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "slices" - - "github.com/cockroachdb/pebble" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/storage" -) - -const ( - VersionSize = 8 - // PruneCommitBatchSize defines the size, in number of key/value pairs, to prune - // in a single batch. - PruneCommitBatchSize = 50 - - StorePrefixTpl = "s/k:%s/" // s/k: - latestVersionKey = "s/_latest" // NB: latestVersionKey key must be lexically smaller than StorePrefixTpl - pruneHeightKey = "s/_prune_height" // NB: pruneHeightKey key must be lexically smaller than StorePrefixTpl - tombstoneVal = "TOMBSTONE" -) - -var _ storage.Database = (*Database)(nil) - -type Database struct { - storage *pebble.DB - - // earliestVersion defines the earliest version set in the database, which is - // only updated when the database is pruned. - earliestVersion uint64 - - // Sync is whether to sync writes through the OS buffer cache and down onto - // the actual disk, if applicable. Setting Sync is required for durability of - // individual write operations but can result in slower writes. - // - // If false, and the process or machine crashes, then a recent write may be - // lost. This is due to the recently written data being buffered inside the - // process running Pebble. This differs from the semantics of a write system - // call in which the data is buffered in the OS buffer cache and would thus - // survive a process crash. - sync bool -} - -func New(dataDir string) (*Database, error) { - opts := &pebble.Options{ - Comparer: MVCCComparer, - } - opts = opts.EnsureDefaults() - - db, err := pebble.Open(dataDir, opts) - if err != nil { - return nil, fmt.Errorf("failed to open PebbleDB: %w", err) - } - - pruneHeight, err := getPruneHeight(db) - if err != nil { - return nil, fmt.Errorf("failed to get prune height: %w", err) - } - - return &Database{ - storage: db, - earliestVersion: pruneHeight + 1, - sync: true, - }, nil -} - -func NewWithDB(storage *pebble.DB, sync bool) *Database { - pruneHeight, err := getPruneHeight(storage) - if err != nil { - panic(fmt.Errorf("failed to get prune height: %w", err)) - } - - return &Database{ - storage: storage, - earliestVersion: pruneHeight + 1, - sync: sync, - } -} - -func (db *Database) SetSync(sync bool) { - db.sync = sync -} - -func (db *Database) Close() error { - err := db.storage.Close() - db.storage = nil - return err -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - b, err := NewBatch(db.storage, version, db.sync) - if err != nil { - return nil, err - } - - return b, nil -} - -func (db *Database) SetLatestVersion(version uint64) error { - var ts [VersionSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - return db.storage.Set([]byte(latestVersionKey), ts[:], &pebble.WriteOptions{Sync: db.sync}) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - bz, closer, err := db.storage.Get([]byte(latestVersionKey)) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - // in case of a fresh database - return 0, nil - } - - return 0, err - } - - if len(bz) == 0 { - return 0, closer.Close() - } - - return binary.LittleEndian.Uint64(bz), closer.Close() -} - -func (db *Database) setPruneHeight(pruneVersion uint64) error { - db.earliestVersion = pruneVersion + 1 - - var ts [VersionSize]byte - binary.LittleEndian.PutUint64(ts[:], pruneVersion) - - return db.storage.Set([]byte(pruneHeightKey), ts[:], &pebble.WriteOptions{Sync: db.sync}) -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - val, err := db.Get(storeKey, version, key) - if err != nil { - return false, err - } - - return val != nil, nil -} - -func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) { - if targetVersion < db.earliestVersion { - return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion, RequestedVersion: targetVersion} - } - - prefixedVal, err := getMVCCSlice(db.storage, storeKey, key, targetVersion) - if err != nil { - if errors.Is(err, storeerrors.ErrRecordNotFound) { - return nil, nil - } - - return nil, fmt.Errorf("failed to perform PebbleDB read: %w", err) - } - - valBz, tombBz, ok := SplitMVCCKey(prefixedVal) - if !ok { - return nil, fmt.Errorf("invalid PebbleDB MVCC value: %s", prefixedVal) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if len(tombBz) == 0 { - return valBz, nil - } - - tombstone, err := decodeUint64Ascending(tombBz) - if err != nil { - return nil, fmt.Errorf("failed to decode value tombstone: %w", err) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if targetVersion < tombstone { - return valBz, nil - } - - // the value is considered deleted - return nil, nil -} - -// Prune removes all versions of all keys that are <= the given version. -// -// Note, the implementation of this method is inefficient and can be potentially -// time consuming given the size of the database and when the last pruning occurred -// (if any). This is because the implementation iterates over all keys in the -// database in order to delete them. -// -// See: https://github.com/cockroachdb/cockroach/blob/33623e3ee420174a4fd3226d1284b03f0e3caaac/pkg/storage/mvcc.go#L3182 -func (db *Database) Prune(version uint64) error { - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: []byte("s/k:")}) - if err != nil { - return err - } - defer itr.Close() - - batch := db.storage.NewBatch() - defer batch.Close() - - var ( - batchCounter int - prevKey, prevKeyPrefixed, prevPrefixedVal []byte - prevKeyVersion uint64 - ) - - for itr.First(); itr.Valid(); { - prefixedKey := slices.Clone(itr.Key()) - - keyBz, verBz, ok := SplitMVCCKey(prefixedKey) - if !ok { - return fmt.Errorf("invalid PebbleDB MVCC key: %s", prefixedKey) - } - - keyVersion, err := decodeUint64Ascending(verBz) - if err != nil { - return fmt.Errorf("failed to decode key version: %w", err) - } - - // seek to next key if we are at a version which is higher than prune height - if keyVersion > version { - itr.NextPrefix() - continue - } - - // Delete a key if another entry for that key exists a larger version than - // the original but <= to the prune height. We also delete a key if it has - // been tombstoned and its version is <= to the prune height. - if prevKeyVersion <= version && (bytes.Equal(prevKey, keyBz) || valTombstoned(prevPrefixedVal)) { - if err := batch.Delete(prevKeyPrefixed, nil); err != nil { - return err - } - - batchCounter++ - if batchCounter >= PruneCommitBatchSize { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - - batchCounter = 0 - batch.Reset() - } - } - - prevKey = keyBz - prevKeyVersion = keyVersion - prevKeyPrefixed = prefixedKey - prevPrefixedVal = slices.Clone(itr.Value()) - - itr.Next() - } - - // commit any leftover delete ops in batch - if batchCounter > 0 { - if err := batch.Commit(&pebble.WriteOptions{Sync: db.sync}); err != nil { - return err - } - } - - return db.setPruneHeight(version) -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0) - - var upperBound []byte - if end != nil { - upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0) - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound}) - if err != nil { - return nil, err - } - - return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, false), nil -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - lowerBound := MVCCEncode(prependStoreKey(storeKey, start), 0) - - var upperBound []byte - if end != nil { - upperBound = MVCCEncode(prependStoreKey(storeKey, end), 0) - } - - itr, err := db.storage.NewIter(&pebble.IterOptions{LowerBound: lowerBound, UpperBound: upperBound}) - if err != nil { - return nil, err - } - - return newPebbleDBIterator(itr, storePrefix(storeKey), start, end, version, db.earliestVersion, true), nil -} - -func storePrefix(storeKey []byte) []byte { - return append([]byte(StorePrefixTpl), storeKey...) -} - -func prependStoreKey(storeKey, key []byte) []byte { - return append(storePrefix(storeKey), key...) -} - -func getPruneHeight(storage *pebble.DB) (uint64, error) { - bz, closer, err := storage.Get([]byte(pruneHeightKey)) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - // in cases where pruning was never triggered - return 0, nil - } - - return 0, err - } - - if len(bz) == 0 { - return 0, closer.Close() - } - - return binary.LittleEndian.Uint64(bz), closer.Close() -} - -func valTombstoned(value []byte) bool { - if value == nil { - return false - } - - _, tombBz, ok := SplitMVCCKey(value) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", value)) - } - - // If the tombstone suffix is empty, we consider this a zero value and thus it - // is not tombstoned. - if len(tombBz) == 0 { - return false - } - - return true -} - -func getMVCCSlice(db *pebble.DB, storeKey, key []byte, version uint64) ([]byte, error) { - // end domain is exclusive, so we need to increment the version by 1 - if version < math.MaxUint64 { - version++ - } - - itr, err := db.NewIter(&pebble.IterOptions{ - LowerBound: MVCCEncode(prependStoreKey(storeKey, key), 0), - UpperBound: MVCCEncode(prependStoreKey(storeKey, key), version), - }) - if err != nil { - return nil, err - } - - defer itr.Close() - - if !itr.Last() { - return nil, storeerrors.ErrRecordNotFound - } - - _, vBz, ok := SplitMVCCKey(itr.Key()) - if !ok { - return nil, fmt.Errorf("invalid PebbleDB MVCC key: %s", itr.Key()) - } - - keyVersion, err := decodeUint64Ascending(vBz) - if err != nil { - return nil, fmt.Errorf("failed to decode key version: %w", err) - } - if keyVersion > version { - return nil, fmt.Errorf("key version too large: %d", keyVersion) - } - - return slices.Clone(itr.Value()), nil -} diff --git a/store/v2/storage/pebbledb/db_test.go b/store/v2/storage/pebbledb/db_test.go deleted file mode 100644 index 0ef4c8ca9f..0000000000 --- a/store/v2/storage/pebbledb/db_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package pebbledb - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - if err == nil && db != nil { - // We set sync=false just to speed up CI tests. Operators should take - // careful consideration when setting this value in production environments. - db.SetSync(false) - } - - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 12, - } - - suite.Run(t, s) -} diff --git a/store/v2/storage/pebbledb/iterator.go b/store/v2/storage/pebbledb/iterator.go deleted file mode 100644 index 6b16805e9d..0000000000 --- a/store/v2/storage/pebbledb/iterator.go +++ /dev/null @@ -1,426 +0,0 @@ -package pebbledb - -import ( - "bytes" - "fmt" - "slices" - - "github.com/cockroachdb/pebble" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -// iterator implements the store.Iterator interface. It wraps a PebbleDB iterator -// with added MVCC key handling logic. The iterator will iterate over the key space -// in the provided domain for a given version. If a key has been written at the -// provided version, that key/value pair will be iterated over. Otherwise, the -// latest version for that key/value pair will be iterated over s.t. it's less -// than the provided version. -type iterator struct { - source *pebble.Iterator - prefix, start, end []byte - version uint64 - valid bool - reverse bool -} - -func newPebbleDBIterator(src *pebble.Iterator, prefix, mvccStart, mvccEnd []byte, version, earliestVersion uint64, reverse bool) *iterator { - if version < earliestVersion { - return &iterator{ - source: src, - prefix: prefix, - start: mvccStart, - end: mvccEnd, - version: version, - valid: false, - reverse: reverse, - } - } - - // move the underlying PebbleDB iterator to the first key - var valid bool - if reverse { - valid = src.Last() - } else { - valid = src.First() - } - - itr := &iterator{ - source: src, - prefix: prefix, - start: mvccStart, - end: mvccEnd, - version: version, - valid: valid, - reverse: reverse, - } - - if valid { - currKey, currKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - curKeyVersionDecoded, err := decodeUint64Ascending(currKeyVersion) - if err != nil { - itr.valid = false - return itr - } - - // We need to check whether initial key iterator visits has a version <= requested - // version. If larger version, call next to find another key which does. - if curKeyVersionDecoded > itr.version { - itr.Next() - } else { - // If version is less, seek to the largest version of that key <= requested - // iterator version. It is guaranteed this won't move the iterator to a key - // that is invalid since curKeyVersionDecoded <= requested iterator version, - // so there exists at least one version of currKey SeekLT may move to. - itr.valid = itr.source.SeekLT(MVCCEncode(currKey, itr.version+1)) - } - } - return itr -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - - key, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - keyCopy := slices.Clone(key) - return keyCopy[len(itr.prefix):] -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - - val, _, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - return slices.Clone(val) -} - -func (itr *iterator) Next() { - if itr.reverse { - itr.nextReverse() - } else { - itr.nextForward() - } -} - -func (itr *iterator) Valid() bool { - // once invalid, forever invalid - if !itr.valid || !itr.source.Valid() { - itr.valid = false - return itr.valid - } - - // if source has error, consider it invalid - if err := itr.source.Error(); err != nil { - itr.valid = false - return itr.valid - } - - // if key is at the end or past it, consider it invalid - if end := itr.end; end != nil { - if bytes.Compare(end, itr.Key()) <= 0 { - itr.valid = false - return itr.valid - } - } - - return true -} - -func (itr *iterator) Error() error { - return itr.source.Error() -} - -func (itr *iterator) Close() error { - err := itr.source.Close() - itr.source = nil - itr.valid = false - - return err -} - -func (itr *iterator) assertIsValid() { - if !itr.valid { - panic("iterator is invalid") - } -} - -// cursorTombstoned checks if the current cursor is pointing at a key/value pair -// that is tombstoned. If the cursor is tombstoned, is returned, otherwise -// is returned. In the case where the iterator is valid but the key/value -// pair is tombstoned, the caller should call Next(). Note, this method assumes -// the caller assures the iterator is valid first! -func (itr *iterator) cursorTombstoned() bool { - _, tombBz, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC value. - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Key())) - } - - // If the tombstone suffix is empty, we consider this a zero value and thus it - // is not tombstoned. - if len(tombBz) == 0 { - return false - } - - // If the tombstone suffix is non-empty and greater than the target version, - // the value is not tombstoned. - tombstone, err := decodeUint64Ascending(tombBz) - if err != nil { - panic(fmt.Errorf("failed to decode value tombstone: %w", err)) - } - if tombstone > itr.version { - return false - } - - return true -} - -func (itr *iterator) DebugRawIterate() { - valid := itr.source.Valid() - if valid { - // The first key may not represent the desired target version, so move the - // cursor to the correct location. - firstKey, _, _ := SplitMVCCKey(itr.source.Key()) - valid = itr.source.SeekLT(MVCCEncode(firstKey, itr.version+1)) - } - - for valid { - key, vBz, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - version, err := decodeUint64Ascending(vBz) - if err != nil { - panic(fmt.Errorf("failed to decode key version: %w", err)) - } - - val, tombBz, ok := SplitMVCCKey(itr.source.Value()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC value: %s", itr.source.Value())) - } - - var tombstone uint64 - if len(tombBz) > 0 { - tombstone, err = decodeUint64Ascending(vBz) - if err != nil { - panic(fmt.Errorf("failed to decode value tombstone: %w", err)) - } - } - - fmt.Printf("KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", key, val, version, tombstone) - - var next bool - if itr.reverse { - next = itr.source.SeekLT(MVCCEncode(key, 0)) - } else { - next = itr.source.NextPrefix() - } - - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - // the next key must have itr.prefix as the prefix - if !bytes.HasPrefix(nextKey, itr.prefix) { - valid = false - } else { - valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - } - } else { - valid = false - } - } -} - -func (itr *iterator) nextForward() { - if !itr.source.Valid() { - itr.valid = false - return - } - - currKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - next := itr.source.NextPrefix() - - // First move the iterator to the next prefix, which may not correspond to the - // desired version for that key, e.g. if the key was written at a later version, - // so we seek back to the latest desired version, s.t. the version is <= itr.version. - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - if !bytes.HasPrefix(nextKey, itr.prefix) { - // the next key must have itr.prefix as the prefix - itr.valid = false - return - } - - // Move the iterator to the closest version to the desired version, so we - // append the current iterator key to the prefix and seek to that key. - itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - - tmpKey, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - // There exists cases where the SeekLT() call moved us back to the same key - // we started at, so we must move to next key, i.e. two keys forward. - if bytes.Equal(tmpKey, currKey) { - if itr.source.NextPrefix() { - itr.nextForward() - - _, tmpKeyVersion, ok = SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - } else { - itr.valid = false - return - } - } - - // We need to verify that every Next call either moves the iterator to a key - // whose version is less than or equal to requested iterator version, or - // exhausts the iterator. - tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion) - if err != nil { - itr.valid = false - return - } - - // If iterator is at a entry whose version is higher than requested version, - // call nextForward again. - if tmpKeyVersionDecoded > itr.version { - itr.nextForward() - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.nextForward() - } - - return - } - - itr.valid = false -} - -func (itr *iterator) nextReverse() { - if !itr.source.Valid() { - itr.valid = false - return - } - - currKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - panic(fmt.Sprintf("invalid PebbleDB MVCC key: %s", itr.source.Key())) - } - - next := itr.source.SeekLT(MVCCEncode(currKey, 0)) - - // First move the iterator to the next prefix, which may not correspond to the - // desired version for that key, e.g. if the key was written at a later version, - // so we seek back to the latest desired version, s.t. the version is <= itr.version. - if next { - nextKey, _, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - if !bytes.HasPrefix(nextKey, itr.prefix) { - // the next key must have itr.prefix as the prefix - itr.valid = false - return - } - - // Move the iterator to the closest version to the desired version, so we - // append the current iterator key to the prefix and seek to that key. - itr.valid = itr.source.SeekLT(MVCCEncode(nextKey, itr.version+1)) - - _, tmpKeyVersion, ok := SplitMVCCKey(itr.source.Key()) - if !ok { - // XXX: This should not happen as that would indicate we have a malformed - // MVCC key. - itr.valid = false - return - } - - // We need to verify that every Next call either moves the iterator to a key - // whose version is less than or equal to requested iterator version, or - // exhausts the iterator. - tmpKeyVersionDecoded, err := decodeUint64Ascending(tmpKeyVersion) - if err != nil { - itr.valid = false - return - } - - // If iterator is at a entry whose version is higher than requested version, - // call nextReverse again. - if tmpKeyVersionDecoded > itr.version { - itr.nextReverse() - } - - // The cursor might now be pointing at a key/value pair that is tombstoned. - // If so, we must move the cursor. - if itr.valid && itr.cursorTombstoned() { - itr.nextReverse() - } - - return - } - - itr.valid = false -} diff --git a/store/v2/storage/rocksdb/batch.go b/store/v2/storage/rocksdb/batch.go deleted file mode 100644 index 826b81778a..0000000000 --- a/store/v2/storage/rocksdb/batch.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "encoding/binary" - - "github.com/linxGnu/grocksdb" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type Batch struct { - version uint64 - ts [TimestampSize]byte - storage *grocksdb.DB - cfHandle *grocksdb.ColumnFamilyHandle - batch *grocksdb.WriteBatch -} - -// NewBatch creates a new versioned batch used for batch writes. The caller -// must ensure to call Write() on the returned batch to commit the changes and to -// destroy the batch when done. -func NewBatch(db *Database, version uint64) Batch { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - batch := grocksdb.NewWriteBatch() - batch.Put([]byte(latestVersionKey), ts[:]) - - return Batch{ - version: version, - ts: ts, - storage: db.storage, - cfHandle: db.cfHandle, - batch: batch, - } -} - -func (b Batch) Size() int { - return len(b.batch.Data()) -} - -func (b Batch) Reset() error { - b.batch.Clear() - return nil -} - -func (b Batch) Set(storeKey, key, value []byte) error { - prefixedKey := prependStoreKey(storeKey, key) - b.batch.PutCFWithTS(b.cfHandle, prefixedKey, b.ts[:], value) - return nil -} - -func (b Batch) Delete(storeKey, key []byte) error { - prefixedKey := prependStoreKey(storeKey, key) - b.batch.DeleteCFWithTS(b.cfHandle, prefixedKey, b.ts[:]) - return nil -} - -func (b Batch) Write() error { - defer b.batch.Destroy() - return b.storage.Write(defaultWriteOpts, b.batch) -} diff --git a/store/v2/storage/rocksdb/comparator.go b/store/v2/storage/rocksdb/comparator.go deleted file mode 100644 index 5da27d9121..0000000000 --- a/store/v2/storage/rocksdb/comparator.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - "encoding/binary" - - "github.com/linxGnu/grocksdb" -) - -// CreateTSComparator should behavior identical with RocksDB builtin timestamp comparator. -// We also use the same builtin comparator name so the builtin tools `ldb`/`sst_dump` -// can work with the database. -func CreateTSComparator() *grocksdb.Comparator { - return grocksdb.NewComparatorWithTimestamp( - "leveldb.BytewiseComparator.u64ts", - TimestampSize, - compare, - compareTS, - compareWithoutTS, - ) -} - -// compareTS compares timestamp as little endian encoded integers. -// -// NOTICE: The behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compareTS(bz1, bz2 []byte) int { - ts1 := binary.LittleEndian.Uint64(bz1) - ts2 := binary.LittleEndian.Uint64(bz2) - - switch { - case ts1 < ts2: - return -1 - - case ts1 > ts2: - return 1 - - default: - return 0 - } -} - -// compare compares two internal keys with timestamp suffix, larger timestamp -// comes first. -// -// NOTICE: The behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compare(a, b []byte) int { - ret := compareWithoutTS(a, true, b, true) - if ret != 0 { - return ret - } - - // Compare timestamp. For the same user key with different timestamps, larger - // (newer) timestamp comes first, which means seek operation will try to find - // a version less than or equal to the target version. - return -compareTS(a[len(a)-TimestampSize:], b[len(b)-TimestampSize:]) -} - -// compareWithoutTS compares two internal keys without the timestamp part. -// -// NOTICE: the behavior must be identical to RocksDB builtin comparator -// "leveldb.BytewiseComparator.u64ts". -func compareWithoutTS(a []byte, aHasTS bool, b []byte, bHasTS bool) int { - if aHasTS { - a = a[:len(a)-TimestampSize] - } - if bHasTS { - b = b[:len(b)-TimestampSize] - } - - return bytes.Compare(a, b) -} diff --git a/store/v2/storage/rocksdb/db.go b/store/v2/storage/rocksdb/db.go deleted file mode 100644 index 480aeb2cff..0000000000 --- a/store/v2/storage/rocksdb/db.go +++ /dev/null @@ -1,235 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - "encoding/binary" - "fmt" - "slices" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/util" -) - -const ( - TimestampSize = 8 - - StorePrefixTpl = "s/k:%s/" - latestVersionKey = "s/latest" -) - -var ( - _ storage.Database = (*Database)(nil) - - defaultWriteOpts = grocksdb.NewDefaultWriteOptions() - defaultReadOpts = grocksdb.NewDefaultReadOptions() -) - -type Database struct { - storage *grocksdb.DB - cfHandle *grocksdb.ColumnFamilyHandle - - // tsLow reflects the full_history_ts_low CF value, which is earliest version - // supported - tsLow uint64 -} - -func New(dataDir string) (*Database, error) { - storage, cfHandle, err := OpenRocksDB(dataDir) - if err != nil { - return nil, fmt.Errorf("failed to open RocksDB: %w", err) - } - - slice, err := storage.GetFullHistoryTsLow(cfHandle) - if err != nil { - return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) - } - - var tsLow uint64 - tsLowBz := copyAndFreeSlice(slice) - if len(tsLowBz) > 0 { - tsLow = binary.LittleEndian.Uint64(tsLowBz) - } - - return &Database{ - storage: storage, - cfHandle: cfHandle, - tsLow: tsLow, - }, nil -} - -func NewWithDB(storage *grocksdb.DB, cfHandle *grocksdb.ColumnFamilyHandle) (*Database, error) { - slice, err := storage.GetFullHistoryTsLow(cfHandle) - if err != nil { - return nil, fmt.Errorf("failed to get full_history_ts_low: %w", err) - } - - var tsLow uint64 - tsLowBz := copyAndFreeSlice(slice) - if len(tsLowBz) > 0 { - tsLow = binary.LittleEndian.Uint64(tsLowBz) - } - - return &Database{ - storage: storage, - cfHandle: cfHandle, - tsLow: tsLow, - }, nil -} - -func (db *Database) Close() error { - db.storage.Close() - - db.storage = nil - db.cfHandle = nil - - return nil -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - return NewBatch(db, version), nil -} - -func (db *Database) getSlice(storeKey []byte, version uint64, key []byte) (*grocksdb.Slice, error) { - if version < db.tsLow { - return nil, errors.ErrVersionPruned{EarliestVersion: db.tsLow, RequestedVersion: version} - } - - return db.storage.GetCF( - newTSReadOptions(version), - db.cfHandle, - prependStoreKey(storeKey, key), - ) -} - -func (db *Database) SetLatestVersion(version uint64) error { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - return db.storage.Put(defaultWriteOpts, []byte(latestVersionKey), ts[:]) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - bz, err := db.storage.GetBytes(defaultReadOpts, []byte(latestVersionKey)) - if err != nil { - return 0, err - } - - if len(bz) == 0 { - // in case of a fresh database - return 0, nil - } - - return binary.LittleEndian.Uint64(bz), nil -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - slice, err := db.getSlice(storeKey, version, key) - if err != nil { - return false, err - } - - return slice.Exists(), nil -} - -func (db *Database) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - slice, err := db.getSlice(storeKey, version, key) - if err != nil { - return nil, fmt.Errorf("failed to get RocksDB slice: %w", err) - } - - return copyAndFreeSlice(slice), nil -} - -// Prune prunes all versions up to and including the provided version argument. -// Internally, this performs a manual compaction, the data with older timestamp -// will be GCed by compaction. -func (db *Database) Prune(version uint64) error { - tsLow := version + 1 // we increment by 1 to include the provided version - - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], tsLow) - compactOpts := grocksdb.NewCompactRangeOptions() - compactOpts.SetFullHistoryTsLow(ts[:]) - db.storage.CompactRangeCFOpt(db.cfHandle, grocksdb.Range{}, compactOpts) - - db.tsLow = tsLow - return nil -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, errors.ErrStartAfterEnd - } - - prefix := storePrefix(storeKey) - start, end = util.IterateWithPrefix(prefix, start, end) - - itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) - return newRocksDBIterator(itr, prefix, start, end, false), nil -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, errors.ErrStartAfterEnd - } - - prefix := storePrefix(storeKey) - start, end = util.IterateWithPrefix(prefix, start, end) - - itr := db.storage.NewIteratorCF(newTSReadOptions(version), db.cfHandle) - return newRocksDBIterator(itr, prefix, start, end, true), nil -} - -// newTSReadOptions returns ReadOptions used in the RocksDB column family read. -func newTSReadOptions(version uint64) *grocksdb.ReadOptions { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], version) - - readOpts := grocksdb.NewDefaultReadOptions() - readOpts.SetTimestamp(ts[:]) - - return readOpts -} - -func storePrefix(storeKey []byte) []byte { - return append([]byte(StorePrefixTpl), storeKey...) -} - -func prependStoreKey(storeKey, key []byte) []byte { - return append(storePrefix(storeKey), key...) -} - -// copyAndFreeSlice will copy a given RocksDB slice and free it. If the slice does -// not exist, will be returned. -func copyAndFreeSlice(s *grocksdb.Slice) []byte { - defer s.Free() - if !s.Exists() { - return nil - } - - return slices.Clone(s.Data()) -} - -func readOnlySlice(s *grocksdb.Slice) []byte { - if !s.Exists() { - return nil - } - - return s.Data() -} diff --git a/store/v2/storage/rocksdb/db_test.go b/store/v2/storage/rocksdb/db_test.go deleted file mode 100644 index b807f07259..0000000000 --- a/store/v2/storage/rocksdb/db_test.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -var storeKey1 = []byte("store1") - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 12, - } - suite.Run(t, s) -} - -func TestDatabase_ReverseIterator(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - batch := NewBatch(db, 1) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - - // reverse iterator without an end key - iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) - require.NoError(t, err) - - defer iter.Close() - - i, count := 99, 0 - for ; iter.Valid(); iter.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value()) - - i-- - count++ - } - require.Equal(t, 100, count) - require.NoError(t, iter.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter.Valid()) - - // reverse iterator with a start and end domain - iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019")) - require.NoError(t, err) - - defer iter2.Close() - - i, count = 18, 0 - for ; iter2.Valid(); iter2.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value()) - - i-- - count++ - } - require.Equal(t, 9, count) - require.NoError(t, iter2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter2.Valid()) - - // start must be <= end - iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019")) - require.Error(t, err) - require.Nil(t, iter3) -} diff --git a/store/v2/storage/rocksdb/iterator.go b/store/v2/storage/rocksdb/iterator.go deleted file mode 100644 index 7427e90406..0000000000 --- a/store/v2/storage/rocksdb/iterator.go +++ /dev/null @@ -1,155 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "bytes" - - "github.com/linxGnu/grocksdb" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -type iterator struct { - source *grocksdb.Iterator - prefix, start, end []byte - reverse bool - invalid bool -} - -func newRocksDBIterator(source *grocksdb.Iterator, prefix, start, end []byte, reverse bool) *iterator { - if reverse { - if end == nil { - source.SeekToLast() - } else { - source.Seek(end) - - if source.Valid() { - eoaKey := readOnlySlice(source.Key()) // end or after key - if bytes.Compare(end, eoaKey) <= 0 { - source.Prev() - } - } else { - source.SeekToLast() - } - } - } else { - if start == nil { - source.SeekToFirst() - } else { - source.Seek(start) - } - } - - return &iterator{ - source: source, - prefix: prefix, - start: start, - end: end, - reverse: reverse, - invalid: !source.Valid(), - } -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - start := itr.start - if start != nil { - start = start[len(itr.prefix):] - if len(start) == 0 { - start = nil - } - } - - end := itr.end - if end != nil { - end = end[len(itr.prefix):] - if len(end) == 0 { - end = nil - } - } - - return start, end -} - -func (itr *iterator) Valid() bool { - // once invalid, forever invalid - if itr.invalid { - return false - } - - // if source has error, consider it invalid - if err := itr.source.Err(); err != nil { - itr.invalid = true - return false - } - - // if source is invalid, consider it invalid - if !itr.source.Valid() { - itr.invalid = true - return false - } - - // if key is at the end or past it, consider it invalid - start := itr.start - end := itr.end - key := readOnlySlice(itr.source.Key()) - - if itr.reverse { - if start != nil && bytes.Compare(key, start) < 0 { - itr.invalid = true - return false - } - } else { - if end != nil && bytes.Compare(end, key) <= 0 { - itr.invalid = true - return false - } - } - - return true -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Key())[len(itr.prefix):] -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - return copyAndFreeSlice(itr.source.Value()) -} - -func (itr iterator) Next() { - if itr.invalid { - return - } - - if itr.reverse { - itr.source.Prev() - } else { - itr.source.Next() - } -} - -func (itr *iterator) Error() error { - return itr.source.Err() -} - -func (itr *iterator) Close() error { - itr.source.Close() - itr.source = nil - itr.invalid = true - - return nil -} - -func (itr *iterator) assertIsValid() { - if itr.invalid { - panic("iterator is invalid") - } -} diff --git a/store/v2/storage/rocksdb/opts.go b/store/v2/storage/rocksdb/opts.go deleted file mode 100644 index bf2272c17c..0000000000 --- a/store/v2/storage/rocksdb/opts.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package rocksdb - -import ( - "encoding/binary" - "runtime" - - "github.com/linxGnu/grocksdb" -) - -const ( - // CFNameStateStorage defines the RocksDB column family name for versioned state - // storage. - CFNameStateStorage = "state_storage" - - // CFNameDefault defines the RocksDB column family name for the default column. - CFNameDefault = "default" -) - -// NewRocksDBOpts returns the options used for the RocksDB column family for use -// in state storage. -// -// FIXME: We do not enable dict compression for SSTFileWriter, because otherwise -// the file writer won't report correct file size. -// Ref: https://github.com/facebook/rocksdb/issues/11146 -func NewRocksDBOpts(sstFileWriter bool) *grocksdb.Options { - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetComparator(CreateTSComparator()) - opts.IncreaseParallelism(runtime.NumCPU()) - opts.OptimizeLevelStyleCompaction(512 * 1024 * 1024) - opts.SetTargetFileSizeMultiplier(2) - opts.SetLevelCompactionDynamicLevelBytes(true) - - // block based table options - bbto := grocksdb.NewDefaultBlockBasedTableOptions() - - // 1G block cache - bbto.SetBlockSize(32 * 1024) - bbto.SetBlockCache(grocksdb.NewLRUCache(1 << 30)) - - bbto.SetFilterPolicy(grocksdb.NewRibbonHybridFilterPolicy(9.9, 1)) - bbto.SetIndexType(grocksdb.KBinarySearchWithFirstKey) - bbto.SetOptimizeFiltersForMemory(true) - opts.SetBlockBasedTableFactory(bbto) - - // Improve sst file creation speed: compaction or sst file writer. - opts.SetCompressionOptionsParallelThreads(4) - - if !sstFileWriter { - // compression options at bottommost level - opts.SetBottommostCompression(grocksdb.ZSTDCompression) - - compressOpts := grocksdb.NewDefaultCompressionOptions() - compressOpts.MaxDictBytes = 112640 // 110k - compressOpts.Level = 12 - - opts.SetBottommostCompressionOptions(compressOpts, true) - opts.SetBottommostCompressionOptionsZstdMaxTrainBytes(compressOpts.MaxDictBytes*100, true) - } - - return opts -} - -// OpenRocksDB opens a RocksDB database connection for versioned reading and writing. -// It also returns a column family handle for versioning using user-defined timestamps. -// The default column family is used for metadata, specifically key/value pairs -// that are stored on another column family named with "state_storage", which has -// user-defined timestamp enabled. -func OpenRocksDB(dataDir string) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetCreateIfMissingColumnFamilies(true) - - db, cfHandles, err := grocksdb.OpenDbColumnFamilies( - opts, - dataDir, - []string{ - CFNameDefault, - CFNameStateStorage, - }, - []*grocksdb.Options{ - opts, - NewRocksDBOpts(false), - }, - ) - if err != nil { - return nil, nil, err - } - - return db, cfHandles[1], nil -} - -// OpenRocksDBAndTrimHistory opens a RocksDB handle similar to `OpenRocksDB`, -// but it also trims the versions newer than target one, such that it can be used -// for rollback. -func OpenRocksDBAndTrimHistory(dataDir string, version int64) (*grocksdb.DB, *grocksdb.ColumnFamilyHandle, error) { - var ts [TimestampSize]byte - binary.LittleEndian.PutUint64(ts[:], uint64(version)) - - opts := grocksdb.NewDefaultOptions() - opts.SetCreateIfMissing(true) - opts.SetCreateIfMissingColumnFamilies(true) - - db, cfHandles, err := grocksdb.OpenDbAndTrimHistory( - opts, - dataDir, - []string{ - CFNameDefault, - CFNameStateStorage, - }, - []*grocksdb.Options{ - opts, - NewRocksDBOpts(false), - }, - ts[:], - ) - if err != nil { - return nil, nil, err - } - - return db, cfHandles[1], nil -} diff --git a/store/v2/storage/sqlite/batch.go b/store/v2/storage/sqlite/batch.go deleted file mode 100644 index 783b597e04..0000000000 --- a/store/v2/storage/sqlite/batch.go +++ /dev/null @@ -1,104 +0,0 @@ -package sqlite - -import ( - "database/sql" - "fmt" - - "cosmossdk.io/store/v2" -) - -var _ store.Batch = (*Batch)(nil) - -type batchAction int - -const ( - batchActionSet batchAction = 0 - batchActionDel batchAction = 1 -) - -type batchOp struct { - action batchAction - storeKey []byte - key, value []byte -} - -type Batch struct { - db *sql.DB - tx *sql.Tx - ops []batchOp - size int - version uint64 -} - -func NewBatch(db *sql.DB, version uint64) (*Batch, error) { - tx, err := db.Begin() - if err != nil { - return nil, fmt.Errorf("failed to create SQL transaction: %w", err) - } - - return &Batch{ - db: db, - tx: tx, - ops: make([]batchOp, 0), - version: version, - }, nil -} - -func (b *Batch) Size() int { - return b.size -} - -func (b *Batch) Reset() error { - b.ops = nil - b.ops = make([]batchOp, 0) - b.size = 0 - - tx, err := b.db.Begin() - if err != nil { - return err - } - - b.tx = tx - return nil -} - -func (b *Batch) Set(storeKey, key, value []byte) error { - b.size += len(key) + len(value) - b.ops = append(b.ops, batchOp{action: batchActionSet, storeKey: storeKey, key: key, value: value}) - return nil -} - -func (b *Batch) Delete(storeKey, key []byte) error { - b.size += len(key) - b.ops = append(b.ops, batchOp{action: batchActionDel, storeKey: storeKey, key: key}) - return nil -} - -func (b *Batch) Write() error { - _, err := b.tx.Exec(reservedUpsertStmt, reservedStoreKey, keyLatestHeight, b.version, 0, b.version) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - for _, op := range b.ops { - switch op.action { - case batchActionSet: - _, err := b.tx.Exec(upsertStmt, op.storeKey, op.key, op.value, b.version, op.value) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - case batchActionDel: - _, err := b.tx.Exec(delStmt, b.version, op.storeKey, op.key, b.version) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - } - } - - if err := b.tx.Commit(); err != nil { - return fmt.Errorf("failed to write SQL transaction: %w", err) - } - - return nil -} diff --git a/store/v2/storage/sqlite/db.go b/store/v2/storage/sqlite/db.go deleted file mode 100644 index 1ee52583ae..0000000000 --- a/store/v2/storage/sqlite/db.go +++ /dev/null @@ -1,296 +0,0 @@ -package sqlite - -import ( - "bytes" - "database/sql" - "errors" - "fmt" - "path/filepath" - "strings" - - _ "github.com/mattn/go-sqlite3" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - storeerrors "cosmossdk.io/store/v2/errors" - "cosmossdk.io/store/v2/storage" -) - -const ( - driverName = "sqlite3" - dbName = "ss.db?cache=shared&mode=rwc&_journal_mode=WAL" - reservedStoreKey = "_RESERVED_" - keyLatestHeight = "latest_height" - keyPruneHeight = "prune_height" - - reservedUpsertStmt = ` - INSERT INTO state_storage(store_key, key, value, version) - VALUES(?, ?, ?, ?) - ON CONFLICT(store_key, key, version) DO UPDATE SET - value = ?; - ` - upsertStmt = ` - INSERT INTO state_storage(store_key, key, value, version) - VALUES(?, ?, ?, ?) - ON CONFLICT(store_key, key, version) DO UPDATE SET - value = ?; - ` - delStmt = ` - UPDATE state_storage SET tombstone = ? - WHERE id = ( - SELECT id FROM state_storage WHERE store_key = ? AND key = ? AND version <= ? ORDER BY version DESC LIMIT 1 - ) AND tombstone = 0; - ` -) - -var _ storage.Database = (*Database)(nil) - -type Database struct { - storage *sql.DB - - // earliestVersion defines the earliest version set in the database, which is - // only updated when the database is pruned. - earliestVersion uint64 -} - -func New(dataDir string) (*Database, error) { - storage, err := sql.Open(driverName, filepath.Join(dataDir, dbName)) - if err != nil { - return nil, fmt.Errorf("failed to open sqlite DB: %w", err) - } - - stmt := ` - CREATE TABLE IF NOT EXISTS state_storage ( - id integer not null primary key, - store_key varchar not null, - key varchar not null, - value varchar not null, - version integer unsigned not null, - tombstone integer unsigned default 0, - unique (store_key, key, version) - ); - - CREATE UNIQUE INDEX IF NOT EXISTS idx_store_key_version ON state_storage (store_key, key, version); - ` - _, err = storage.Exec(stmt) - if err != nil { - return nil, fmt.Errorf("failed to exec SQL statement: %w", err) - } - - pruneHeight, err := getPruneHeight(storage) - if err != nil { - return nil, fmt.Errorf("failed to get prune height: %w", err) - } - - return &Database{ - storage: storage, - earliestVersion: pruneHeight, - }, nil -} - -func (db *Database) Close() error { - err := db.storage.Close() - db.storage = nil - return err -} - -func (db *Database) NewBatch(version uint64) (store.Batch, error) { - return NewBatch(db.storage, version) -} - -func (db *Database) GetLatestVersion() (uint64, error) { - stmt, err := db.storage.Prepare("SELECT value FROM state_storage WHERE store_key = ? AND key = ?") - if err != nil { - return 0, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - defer stmt.Close() - - var latestHeight uint64 - if err := stmt.QueryRow(reservedStoreKey, keyLatestHeight).Scan(&latestHeight); err != nil { - if errors.Is(err, sql.ErrNoRows) { - // in case of a fresh database - return 0, nil - } - - return 0, fmt.Errorf("failed to query row: %w", err) - } - - return latestHeight, nil -} - -func (db *Database) SetLatestVersion(version uint64) error { - _, err := db.storage.Exec(reservedUpsertStmt, reservedStoreKey, keyLatestHeight, version, 0, version) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - return nil -} - -func (db *Database) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - val, err := db.Get(storeKey, version, key) - if err != nil { - return false, err - } - - return val != nil, nil -} - -func (db *Database) Get(storeKey []byte, targetVersion uint64, key []byte) ([]byte, error) { - if targetVersion < db.earliestVersion { - return nil, storeerrors.ErrVersionPruned{EarliestVersion: db.earliestVersion, RequestedVersion: targetVersion} - } - - stmt, err := db.storage.Prepare(` - SELECT value, tombstone FROM state_storage - WHERE store_key = ? AND key = ? AND version <= ? - ORDER BY version DESC LIMIT 1; - `) - if err != nil { - return nil, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - defer stmt.Close() - - var ( - value []byte - tomb uint64 - ) - if err := stmt.QueryRow(storeKey, key, targetVersion).Scan(&value, &tomb); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, nil - } - - return nil, fmt.Errorf("failed to query row: %w", err) - } - - // A tombstone of zero or a target version that is less than the tombstone - // version means the key is not deleted at the target version. - if tomb == 0 || targetVersion < tomb { - return value, nil - } - - // the value is considered deleted - return nil, nil -} - -// Prune removes all versions of all keys that are <= the given version. It keeps -// the latest (non-tombstoned) version of each key/value tuple to handle queries -// above the prune version. This is analogous to RocksDB full_history_ts_low. -// -// We perform the prune by deleting all versions of a key, excluding reserved keys, -// that are <= the given version, except for the latest version of the key. -func (db *Database) Prune(version uint64) error { - tx, err := db.storage.Begin() - if err != nil { - return fmt.Errorf("failed to create SQL transaction: %w", err) - } - - pruneStmt := `DELETE FROM state_storage - WHERE version < ( - SELECT max(version) FROM state_storage t2 WHERE - t2.store_key = state_storage.store_key AND - t2.key = state_storage.key AND - t2.version <= ? - ) AND store_key != ?; - ` - - _, err = tx.Exec(pruneStmt, version, reservedStoreKey) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - // set the prune height so we can return for queries below this height - _, err = tx.Exec(reservedUpsertStmt, reservedStoreKey, keyPruneHeight, version, 0, version) - if err != nil { - return fmt.Errorf("failed to exec SQL statement: %w", err) - } - - if err := tx.Commit(); err != nil { - return fmt.Errorf("failed to write SQL transaction: %w", err) - } - - db.earliestVersion = version + 1 - - return nil -} - -func (db *Database) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - return newIterator(db, storeKey, version, start, end, false) -} - -func (db *Database) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, storeerrors.ErrKeyEmpty - } - - if start != nil && end != nil && bytes.Compare(start, end) > 0 { - return nil, storeerrors.ErrStartAfterEnd - } - - return newIterator(db, storeKey, version, start, end, true) -} - -func (db *Database) PrintRowsDebug() { - stmt, err := db.storage.Prepare("SELECT store_key, key, value, version, tombstone FROM state_storage") - if err != nil { - panic(fmt.Errorf("failed to prepare SQL statement: %w", err)) - } - - defer stmt.Close() - - rows, err := stmt.Query() - if err != nil { - panic(fmt.Errorf("failed to execute SQL query: %w", err)) - } - - var sb strings.Builder - for rows.Next() { - var ( - storeKey []byte - key []byte - value []byte - version uint64 - tomb uint64 - ) - if err := rows.Scan(&storeKey, &key, &value, &version, &tomb); err != nil { - panic(fmt.Sprintf("failed to scan row: %s", err)) - } - - sb.WriteString(fmt.Sprintf("STORE_KEY: %s, KEY: %s, VALUE: %s, VERSION: %d, TOMBSTONE: %d\n", storeKey, key, value, version, tomb)) - } - if err := rows.Err(); err != nil { - panic(fmt.Errorf("received unexpected error: %w", err)) - } - - fmt.Println(strings.TrimSpace(sb.String())) -} - -func getPruneHeight(storage *sql.DB) (uint64, error) { - stmt, err := storage.Prepare(`SELECT value FROM state_storage WHERE store_key = ? AND key = ?`) - if err != nil { - return 0, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - defer stmt.Close() - - var value uint64 - if err := stmt.QueryRow(reservedStoreKey, keyPruneHeight).Scan(&value); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return 0, nil - } - - return 0, fmt.Errorf("failed to query row: %w", err) - } - - return value, nil -} diff --git a/store/v2/storage/sqlite/db_test.go b/store/v2/storage/sqlite/db_test.go deleted file mode 100644 index fc4c27e6bf..0000000000 --- a/store/v2/storage/sqlite/db_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package sqlite - -import ( - "fmt" - "sync" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2/storage" -) - -var storeKey1 = []byte("store1") - -func TestStorageTestSuite(t *testing.T) { - s := &storage.StorageTestSuite{ - NewDB: func(dir string) (*storage.StorageStore, error) { - db, err := New(dir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - EmptyBatchSize: 0, - } - suite.Run(t, s) -} - -func TestDatabase_ReverseIterator(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - batch, err := db.NewBatch(1) - require.NoError(t, err) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - - // reverse iterator without an end key - iter, err := db.ReverseIterator(storeKey1, 1, []byte("key000"), nil) - require.NoError(t, err) - - defer iter.Close() - - i, count := 99, 0 - for ; iter.Valid(); iter.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter.Value()) - - i-- - count++ - } - require.Equal(t, 100, count) - require.NoError(t, iter.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter.Valid()) - - // reverse iterator with a start and end domain - iter2, err := db.ReverseIterator(storeKey1, 1, []byte("key010"), []byte("key019")) - require.NoError(t, err) - - defer iter2.Close() - - i, count = 18, 0 - for ; iter2.Valid(); iter2.Next() { - require.Equal(t, []byte(fmt.Sprintf("key%03d", i)), iter2.Key()) - require.Equal(t, []byte(fmt.Sprintf("val%03d", i)), iter2.Value()) - - i-- - count++ - } - require.Equal(t, 9, count) - require.NoError(t, iter2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - require.False(t, iter2.Valid()) - - // start must be <= end - iter3, err := db.ReverseIterator(storeKey1, 1, []byte("key020"), []byte("key019")) - require.Error(t, err) - require.Nil(t, iter3) -} - -func TestParallelWrites(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - latestVersion := 10 - kvCount := 100 - - wg := sync.WaitGroup{} - triggerStartCh := make(chan bool) - - // start 10 goroutines that write to the database - for i := 0; i < latestVersion; i++ { - wg.Add(1) - go func(i int) { - <-triggerStartCh - defer wg.Done() - batch, err := db.NewBatch(uint64(i + 1)) - require.NoError(t, err) - for j := 0; j < kvCount; j++ { - key := fmt.Sprintf("key-%d-%03d", i, j) - val := fmt.Sprintf("val-%d-%03d", i, j) - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - }(i) - - } - - // start the goroutines - close(triggerStartCh) - wg.Wait() - - // check that all the data is there - for i := 0; i < latestVersion; i++ { - for j := 0; j < kvCount; j++ { - version := uint64(i + 1) - key := fmt.Sprintf("key-%d-%03d", i, j) - val := fmt.Sprintf("val-%d-%03d", i, j) - - v, err := db.Get(storeKey1, version, []byte(key)) - require.NoError(t, err) - require.Equal(t, []byte(val), v) - } - } -} - -func TestParallelWriteAndPruning(t *testing.T) { - db, err := New(t.TempDir()) - require.NoError(t, err) - defer db.Close() - - latestVersion := 100 - kvCount := 100 - prunePeriod := 5 - - wg := sync.WaitGroup{} - triggerStartCh := make(chan bool) - - // start a goroutine that write to the database - wg.Add(1) - go func() { - <-triggerStartCh - defer wg.Done() - for i := 0; i < latestVersion; i++ { - batch, err := db.NewBatch(uint64(i + 1)) - require.NoError(t, err) - for j := 0; j < kvCount; j++ { - key := fmt.Sprintf("key-%d-%03d", i, j) - val := fmt.Sprintf("val-%d-%03d", i, j) - - require.NoError(t, batch.Set(storeKey1, []byte(key), []byte(val))) - } - - require.NoError(t, batch.Write()) - } - }() - // start a goroutine that prunes the database - wg.Add(1) - go func() { - <-triggerStartCh - defer wg.Done() - for i := 10; i < latestVersion; i += prunePeriod { - for { - v, err := db.GetLatestVersion() - require.NoError(t, err) - if v > uint64(i) { - require.NoError(t, db.Prune(v-1)) - break - } - } - } - }() - - // start the goroutines - close(triggerStartCh) - wg.Wait() - - // check if the data is pruned - version := uint64(latestVersion - prunePeriod) - val, err := db.Get(storeKey1, version, []byte(fmt.Sprintf("key-%d-%03d", version-1, 0))) - require.Error(t, err) - require.Nil(t, val) - - version = uint64(latestVersion) - val, err = db.Get(storeKey1, version, []byte(fmt.Sprintf("key-%d-%03d", version-1, 0))) - require.NoError(t, err) - require.Equal(t, []byte(fmt.Sprintf("val-%d-%03d", version-1, 0)), val) -} diff --git a/store/v2/storage/sqlite/iterator.go b/store/v2/storage/sqlite/iterator.go deleted file mode 100644 index daf7e073db..0000000000 --- a/store/v2/storage/sqlite/iterator.go +++ /dev/null @@ -1,183 +0,0 @@ -package sqlite - -import ( - "bytes" - "database/sql" - "fmt" - "slices" - "strings" - - corestore "cosmossdk.io/core/store" -) - -var _ corestore.Iterator = (*iterator)(nil) - -type iterator struct { - statement *sql.Stmt - rows *sql.Rows - key, val []byte - start, end []byte - valid bool - err error -} - -func newIterator(db *Database, storeKey []byte, targetVersion uint64, start, end []byte, reverse bool) (*iterator, error) { - if targetVersion < db.earliestVersion { - return &iterator{ - start: start, - end: end, - valid: false, - }, nil - } - - var ( - keyClause = []string{"store_key = ?", "version <= ?"} - queryArgs []any - ) - - switch { - case len(start) > 0 && len(end) > 0: - keyClause = append(keyClause, "key >= ?", "key < ?") - queryArgs = []any{storeKey, targetVersion, start, end, targetVersion} - - case len(start) > 0 && len(end) == 0: - keyClause = append(keyClause, "key >= ?") - queryArgs = []any{storeKey, targetVersion, start, targetVersion} - - case len(start) == 0 && len(end) > 0: - keyClause = append(keyClause, "key < ?") - queryArgs = []any{storeKey, targetVersion, end, targetVersion} - - default: - queryArgs = []any{storeKey, targetVersion, targetVersion} - } - - orderBy := "ASC" - if reverse { - orderBy = "DESC" - } - - // Note, this is not susceptible to SQL injection because placeholders are used - // for parts of the query outside the store's direct control. - stmt, err := db.storage.Prepare(fmt.Sprintf(` - SELECT x.key, x.value - FROM ( - SELECT key, value, version, tombstone, - row_number() OVER (PARTITION BY key ORDER BY version DESC) AS _rn - FROM state_storage WHERE %s - ) x - WHERE x._rn = 1 AND (x.tombstone = 0 OR x.tombstone > ?) ORDER BY x.key %s; - `, strings.Join(keyClause, " AND "), orderBy)) - if err != nil { - return nil, fmt.Errorf("failed to prepare SQL statement: %w", err) - } - - rows, err := stmt.Query(queryArgs...) - if err != nil { - _ = stmt.Close() - return nil, fmt.Errorf("failed to execute SQL query: %w", err) - } - - itr := &iterator{ - statement: stmt, - rows: rows, - start: start, - end: end, - valid: rows.Next(), - } - if !itr.valid { - itr.err = fmt.Errorf("iterator invalid: %w", sql.ErrNoRows) - return itr, nil - } - - // read the first row - itr.parseRow() - if !itr.valid { - return itr, nil - } - - return itr, nil -} - -func (itr *iterator) Close() (err error) { - if itr.statement != nil { - err = itr.statement.Close() - } - - itr.valid = false - itr.statement = nil - itr.rows = nil - - return err -} - -// Domain returns the domain of the iterator. The caller must not modify the -// return values. -func (itr *iterator) Domain() ([]byte, []byte) { - return itr.start, itr.end -} - -func (itr *iterator) Key() []byte { - itr.assertIsValid() - return slices.Clone(itr.key) -} - -func (itr *iterator) Value() []byte { - itr.assertIsValid() - return slices.Clone(itr.val) -} - -func (itr *iterator) Valid() bool { - if !itr.valid || itr.rows.Err() != nil { - itr.valid = false - return itr.valid - } - - // if key is at the end or past it, consider it invalid - if end := itr.end; end != nil { - if bytes.Compare(end, itr.Key()) <= 0 { - itr.valid = false - return itr.valid - } - } - - return true -} - -func (itr *iterator) Next() { - if itr.rows.Next() { - itr.parseRow() - return - } - - itr.valid = false -} - -func (itr *iterator) Error() error { - if err := itr.rows.Err(); err != nil { - return err - } - - return itr.err -} - -func (itr *iterator) parseRow() { - var ( - key []byte - value []byte - ) - if err := itr.rows.Scan(&key, &value); err != nil { - itr.err = fmt.Errorf("failed to scan row: %w", err) - itr.valid = false - return - } - - itr.key = key - itr.val = value -} - -func (itr *iterator) assertIsValid() { - if !itr.valid { - panic("iterator is invalid") - } -} diff --git a/store/v2/storage/storage_bench_test.go b/store/v2/storage/storage_bench_test.go deleted file mode 100644 index 960c144782..0000000000 --- a/store/v2/storage/storage_bench_test.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build rocksdb -// +build rocksdb - -package storage_test - -import ( - "bytes" - "fmt" - "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/require" - - corestore "cosmossdk.io/core/store" - coretesting "cosmossdk.io/core/testing" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/storage" - "cosmossdk.io/store/v2/storage/pebbledb" - "cosmossdk.io/store/v2/storage/rocksdb" - "cosmossdk.io/store/v2/storage/sqlite" -) - -var storeKey1 = []byte("store1") - -var ( - backends = map[string]func(dataDir string) (store.VersionedDatabase, error){ - "rocksdb_versiondb_opts": func(dataDir string) (store.VersionedDatabase, error) { - db, err := rocksdb.New(dataDir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - "pebbledb_default_opts": func(dataDir string) (store.VersionedDatabase, error) { - db, err := pebbledb.New(dataDir) - if err == nil && db != nil { - db.SetSync(false) - } - - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - "btree_sqlite": func(dataDir string) (store.VersionedDatabase, error) { - db, err := sqlite.New(dataDir) - return storage.NewStorageStore(db, coretesting.NewNopLogger()), err - }, - } - rng = rand.New(rand.NewSource(567320)) -) - -func BenchmarkGet(b *testing.B) { - numKeyVals := 1_000_000 - keys := make([][]byte, numKeyVals) - vals := make([][]byte, numKeyVals) - for i := 0; i < numKeyVals; i++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err := rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - keys[i] = key - vals[i] = val - } - - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{string(storeKey1): {}}) - for i := 0; i < numKeyVals; i++ { - cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]}) - } - - require.NoError(b, db.ApplyChangeset(1, cs)) - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - key := keys[rng.Intn(len(keys))] - - b.StartTimer() - _, err = db.Get(storeKey1, 1, key) - require.NoError(b, err) - } - }) - } -} - -func BenchmarkApplyChangeset(b *testing.B) { - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{string(storeKey1): {}}) - for j := 0; j < 1000; j++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err = rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - cs.AddKVPair(storeKey1, corestore.KVPair{Key: key, Value: val}) - } - - b.StartTimer() - require.NoError(b, db.ApplyChangeset(uint64(b.N+1), cs)) - } - }) - } -} - -func BenchmarkIterate(b *testing.B) { - numKeyVals := 1_000_000 - keys := make([][]byte, numKeyVals) - vals := make([][]byte, numKeyVals) - for i := 0; i < numKeyVals; i++ { - key := make([]byte, 128) - val := make([]byte, 128) - - _, err := rng.Read(key) - require.NoError(b, err) - _, err = rng.Read(val) - require.NoError(b, err) - - keys[i] = key - vals[i] = val - - } - - for ty, fn := range backends { - db, err := fn(b.TempDir()) - require.NoError(b, err) - defer func() { - _ = db.Close() - }() - - b.StopTimer() - - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{string(storeKey1): {}}) - for i := 0; i < numKeyVals; i++ { - cs.AddKVPair(storeKey1, corestore.KVPair{Key: keys[i], Value: vals[i]}) - } - - require.NoError(b, db.ApplyChangeset(1, cs)) - - sort.Slice(keys, func(i, j int) bool { - return bytes.Compare(keys[i], keys[j]) < 0 - }) - - b.Run(fmt.Sprintf("backend_%s", ty), func(b *testing.B) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - - itr, err := db.Iterator(storeKey1, 1, keys[0], nil) - require.NoError(b, err) - - b.StartTimer() - - for ; itr.Valid(); itr.Next() { - _ = itr.Key() - _ = itr.Value() - } - - require.NoError(b, itr.Error()) - } - }) - } -} diff --git a/store/v2/storage/storage_test_suite.go b/store/v2/storage/storage_test_suite.go deleted file mode 100644 index 475c529b62..0000000000 --- a/store/v2/storage/storage_test_suite.go +++ /dev/null @@ -1,666 +0,0 @@ -package storage - -import ( - "fmt" - "slices" - "testing" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" -) - -const ( - storeKey1 = "store1" -) - -var storeKey1Bytes = []byte(storeKey1) - -// StorageTestSuite defines a reusable test suite for all storage backends. -type StorageTestSuite struct { - suite.Suite - - NewDB func(dir string) (*StorageStore, error) - EmptyBatchSize int - SkipTests []string -} - -func (s *StorageTestSuite) TestDatabase_Close() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - s.Require().NoError(db.Close()) - - // close should not be idempotent - s.Require().Panics(func() { _ = db.Close() }) -} - -func (s *StorageTestSuite) TestDatabase_LatestVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - lv, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Zero(lv) - - for i := uint64(1); i <= 1001; i++ { - err = db.SetLatestVersion(i) - s.Require().NoError(err) - - lv, err = db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(i, lv) - } -} - -func (s *StorageTestSuite) TestDatabase_VersionedKeys() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - for i := uint64(1); i <= 100; i++ { - s.Require().NoError(db.ApplyChangeset(i, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte(fmt.Sprintf("value%03d", i))}}, - }, - ))) - } - - for i := uint64(1); i <= 100; i++ { - bz, err := db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Equal(fmt.Sprintf("value%03d", i), string(bz)) - } -} - -func (s *StorageTestSuite) TestDatabase_GetVersionedKey() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // store a key at version 1 - s.Require().NoError(db.ApplyChangeset(1, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte("value001")}}, - }, - ))) - - // assume chain progresses to version 10 w/o any changes to key - bz, err := db.Get(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value001"), bz) - - ok, err := db.Has(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - - // chain progresses to version 11 with an update to key - s.Require().NoError(db.ApplyChangeset(11, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key"), Value: []byte("value011")}}, - }, - ))) - - bz, err = db.Get(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value001"), bz) - - ok, err = db.Has(storeKey1Bytes, 10, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - - for i := uint64(11); i <= 14; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Equal([]byte("value011"), bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - } - - // chain progresses to version 15 with a delete to key - s.Require().NoError(db.ApplyChangeset(15, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{storeKey1: {{Key: []byte("key"), Remove: true}}}, - ))) - - // all queries up to version 14 should return the latest value - for i := uint64(1); i <= 14; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().NotNil(bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().True(ok) - } - - // all queries after version 15 should return nil - for i := uint64(15); i <= 17; i++ { - bz, err = db.Get(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().Nil(bz) - - ok, err = db.Has(storeKey1Bytes, i, []byte("key")) - s.Require().NoError(err) - s.Require().False(ok) - } -} - -func (s *StorageTestSuite) TestDatabase_ApplyChangeset() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 100; i++ { - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Value: []byte("value")}) - } - - for i := 0; i < 100; i++ { - if i%10 == 0 { - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(fmt.Sprintf("key%03d", i)), Remove: true}) - } - } - - s.Require().NoError(db.ApplyChangeset(1, cs)) - - lv, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(1), lv) - - for i := 0; i < 1; i++ { - ok, err := db.Has(storeKey1Bytes, 1, []byte(fmt.Sprintf("key%03d", i))) - s.Require().NoError(err) - - if i%10 == 0 { - s.Require().False(ok) - } else { - s.Require().True(ok) - } - } -} - -func (s *StorageTestSuite) TestDatabase_IteratorEmptyDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - iter, err := db.Iterator(storeKey1Bytes, 1, []byte{}, []byte{}) - s.Require().Error(err) - s.Require().Nil(iter) -} - -func (s *StorageTestSuite) TestDatabase_IteratorClose() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - iter, err := db.Iterator(storeKey1Bytes, 1, []byte("key000"), nil) - s.Require().NoError(err) - iter.Close() - - s.Require().False(iter.Valid()) -} - -func (s *StorageTestSuite) TestDatabase_IteratorDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - testCases := map[string]struct { - start, end []byte - }{ - "start without end domain": { - start: []byte("key010"), - }, - "start and end domain": { - start: []byte("key010"), - end: []byte("key020"), - }, - } - - for name, tc := range testCases { - s.Run(name, func() { - iter, err := db.Iterator(storeKey1Bytes, 1, tc.start, tc.end) - s.Require().NoError(err) - - defer iter.Close() - - start, end := iter.Domain() - s.Require().Equal(tc.start, start) - s.Require().Equal(tc.end, end) - }) - } -} - -func (s *StorageTestSuite) TestDatabase_Iterator() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 100; i++ { - key := fmt.Sprintf("key%03d", i) // key000, key001, ..., key099 - val := fmt.Sprintf("val%03d", i) // val000, val001, ..., val099 - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - - s.Require().NoError(db.ApplyChangeset(1, cs)) - - // iterator without an end key over multiple versions - for v := uint64(1); v < 5; v++ { - itr, err := db.Iterator(storeKey1Bytes, v, []byte("key000"), nil) - s.Require().NoError(err) - - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr.Value()) - - i++ - count++ - } - s.Require().NoError(itr.Error()) - s.Require().Equal(100, count) - - // seek past domain, which should make the iterator invalid and produce an error - s.Require().False(itr.Valid()) - - err = itr.Close() - s.Require().NoError(err, "Failed to close iterator") - } - - // iterator with a start and end domain over multiple versions - for v := uint64(1); v < 5; v++ { - itr2, err := db.Iterator(storeKey1Bytes, v, []byte("key010"), []byte("key019")) - s.Require().NoError(err) - - i, count := 10, 0 - for ; itr2.Valid(); itr2.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr2.Key()) - s.Require().Equal([]byte(fmt.Sprintf("val%03d", i)), itr2.Value()) - - i++ - count++ - } - s.Require().Equal(9, count) - s.Require().NoError(itr2.Error()) - - // seek past domain, which should make the iterator invalid and produce an error - s.Require().False(itr2.Valid()) - - err = itr2.Close() - if err != nil { - return - } - } - - // start must be <= end - iter3, err := db.Iterator(storeKey1Bytes, 1, []byte("key020"), []byte("key019")) - s.Require().Error(err) - s.Require().Nil(iter3) -} - -func (s *StorageTestSuite) TestDatabase_Iterator_RangedDeletes() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - s.Require().NoError(db.ApplyChangeset(1, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{ - storeKey1: { - {Key: []byte("key001"), Value: []byte("value001"), Remove: false}, - {Key: []byte("key002"), Value: []byte("value001"), Remove: false}, - }, - }, - ))) - - s.Require().NoError(db.ApplyChangeset(5, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key002"), Value: []byte("value002"), Remove: false}}, - }, - ))) - - s.Require().NoError(db.ApplyChangeset(10, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{ - storeKey1: {{Key: []byte("key002"), Remove: true}}, - }, - ))) - - itr, err := db.Iterator(storeKey1Bytes, 11, []byte("key001"), nil) - s.Require().NoError(err) - - defer itr.Close() - - // there should only be one valid key in the iterator -- key001 - var count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte("key001"), itr.Key()) - count++ - } - s.Require().Equal(1, count) - s.Require().NoError(itr.Error()) -} - -func (s *StorageTestSuite) TestDatabase_IteratorMultiVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-49, set all 10 keys - for v := uint64(1); v < 50; v++ { - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(v, cs)) - } - - // for versions 50-100, only update even keys - for v := uint64(50); v <= 100; v++ { - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - if i%2 == 0 { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - } - - s.Require().NoError(db.ApplyChangeset(v, cs)) - } - - itr, err := db.Iterator(storeKey1Bytes, 69, []byte("key000"), nil) - s.Require().NoError(err) - - defer itr.Close() - - // All keys should be present; All odd keys should have a value that reflects - // version 49, and all even keys should have a value that reflects the desired - // version, 69. - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - - if i%2 == 0 { - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 69)), itr.Value()) - } else { - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 49)), itr.Value()) - } - - i = (i + 1) % 10 - count++ - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(10, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_SkipVersion() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - - defer db.Close() - - DBApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - DBApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value000")}) - DBApplyChangeset(s.T(), db, 58827506, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value000")}) - DBApplyChangeset(s.T(), db, 58833605, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value004")}) - DBApplyChangeset(s.T(), db, 58833606, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value006")}) - - itr, err := db.Iterator(storeKey1Bytes, 58831525, []byte("key"), nil) - s.Require().NoError(err) - defer itr.Close() - - count := make(map[string]struct{}) - for ; itr.Valid(); itr.Next() { - count[string(itr.Key())] = struct{}{} - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(3, len(count)) -} - -func (s *StorageTestSuite) TestDatabaseIterator_ForwardIteration() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - DBApplyChangeset(s.T(), db, 8, storeKey1, [][]byte{[]byte("keyA")}, [][]byte{[]byte("value001")}) - DBApplyChangeset(s.T(), db, 9, storeKey1, [][]byte{[]byte("keyB")}, [][]byte{[]byte("value002")}) - DBApplyChangeset(s.T(), db, 10, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - DBApplyChangeset(s.T(), db, 11, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value004")}) - - DBApplyChangeset(s.T(), db, 2, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value007")}) - DBApplyChangeset(s.T(), db, 3, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value008")}) - DBApplyChangeset(s.T(), db, 4, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")}) - DBApplyChangeset(s.T(), db, 5, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")}) - - itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ")) - s.Require().NoError(err) - - defer itr.Close() - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - - s.Require().NoError(itr.Error()) - s.Require().Equal(4, count) -} - -func (s *StorageTestSuite) TestDatabaseIterator_ForwardIterationHigher() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - DBApplyChangeset(s.T(), db, 9, storeKey1, [][]byte{[]byte("keyB")}, [][]byte{[]byte("value002")}) - DBApplyChangeset(s.T(), db, 10, storeKey1, [][]byte{[]byte("keyC")}, [][]byte{[]byte("value003")}) - DBApplyChangeset(s.T(), db, 11, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value004")}) - - DBApplyChangeset(s.T(), db, 12, storeKey1, [][]byte{[]byte("keyD")}, [][]byte{[]byte("value007")}) - DBApplyChangeset(s.T(), db, 13, storeKey1, [][]byte{[]byte("keyE")}, [][]byte{[]byte("value008")}) - DBApplyChangeset(s.T(), db, 14, storeKey1, [][]byte{[]byte("keyF")}, [][]byte{[]byte("value009")}) - DBApplyChangeset(s.T(), db, 15, storeKey1, [][]byte{[]byte("keyH")}, [][]byte{[]byte("value010")}) - - itr, err := db.Iterator(storeKey1Bytes, 6, nil, []byte("keyZ")) - s.Require().NoError(err) - - defer itr.Close() - - count := 0 - for ; itr.Valid(); itr.Next() { - count++ - } - - s.Require().Equal(0, count) -} - -func (s *StorageTestSuite) TestDatabase_IteratorNoDomain() { - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-50, set all 10 keys - for v := uint64(1); v <= 50; v++ { - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val), Remove: false}) - } - - s.Require().NoError(db.ApplyChangeset(v, cs)) - } - - // create an iterator over the entire domain - itr, err := db.Iterator(storeKey1Bytes, 50, nil, nil) - s.Require().NoError(err) - - defer itr.Close() - - var i, count int - for ; itr.Valid(); itr.Next() { - s.Require().Equal([]byte(fmt.Sprintf("key%03d", i)), itr.Key(), string(itr.Key())) - s.Require().Equal([]byte(fmt.Sprintf("val%03d-%03d", i, 50)), itr.Value()) - - i++ - count++ - } - s.Require().NoError(itr.Error()) - s.Require().Equal(10, count) -} - -func (s *StorageTestSuite) TestDatabase_Prune() { - if slices.Contains(s.SkipTests, s.T().Name()) { - s.T().SkipNow() - } - - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - // for versions 1-50, set 10 keys - for v := uint64(1); v <= 50; v++ { - cs := corestore.NewChangesetWithPairs(map[string]corestore.KVPairs{storeKey1: {}}) - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - cs.AddKVPair(storeKey1Bytes, corestore.KVPair{Key: []byte(key), Value: []byte(val)}) - } - - s.Require().NoError(db.ApplyChangeset(v, cs)) - } - - // prune the first 25 versions - s.Require().NoError(db.Prune(25)) - - latestVersion, err := db.GetLatestVersion() - s.Require().NoError(err) - s.Require().Equal(uint64(50), latestVersion) - - // Ensure all keys are no longer present up to and including version 25 and - // all keys are present after version 25. - for v := uint64(1); v <= 50; v++ { - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - val := fmt.Sprintf("val%03d-%03d", i, v) - - bz, err := db.Get(storeKey1Bytes, v, []byte(key)) - if v <= 25 { - s.Require().Error(err) - s.Require().Nil(bz) - } else { - s.Require().NoError(err) - s.Require().Equal([]byte(val), bz) - } - } - } - - itr, err := db.Iterator(storeKey1Bytes, 25, []byte("key000"), nil) - s.Require().NoError(err) - s.Require().False(itr.Valid()) - - // prune the latest version which should prune the entire dataset - s.Require().NoError(db.Prune(50)) - - for v := uint64(1); v <= 50; v++ { - for i := 0; i < 10; i++ { - key := fmt.Sprintf("key%03d", i) - - bz, err := db.Get(storeKey1Bytes, v, []byte(key)) - s.Require().Error(err) - s.Require().Nil(bz) - } - } -} - -func (s *StorageTestSuite) TestDatabase_Prune_KeepRecent() { - if slices.Contains(s.SkipTests, s.T().Name()) { - s.T().SkipNow() - } - - db, err := s.NewDB(s.T().TempDir()) - s.Require().NoError(err) - defer db.Close() - - key := []byte("key") - - // write a key at three different versions - s.Require().NoError(db.ApplyChangeset(1, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val001"), Remove: false}}}, - ))) - s.Require().NoError(db.ApplyChangeset(100, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val100"), Remove: false}}}, - ))) - s.Require().NoError(db.ApplyChangeset(200, corestore.NewChangesetWithPairs( - map[string]corestore.KVPairs{storeKey1: {{Key: key, Value: []byte("val200"), Remove: false}}}, - ))) - - // prune version 50 - s.Require().NoError(db.Prune(50)) - - // ensure queries for versions 50 and older return nil - bz, err := db.Get(storeKey1Bytes, 49, key) - s.Require().Error(err) - s.Require().Nil(bz) - - itr, err := db.Iterator(storeKey1Bytes, 49, nil, nil) - s.Require().NoError(err) - s.Require().False(itr.Valid()) - - defer itr.Close() - - // ensure the value previously at version 1 is still there for queries greater than 50 - bz, err = db.Get(storeKey1Bytes, 51, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val001"), bz) - - // ensure the correct value at a greater height - bz, err = db.Get(storeKey1Bytes, 200, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val200"), bz) - - // prune latest height and ensure we have the previous version when querying above it - s.Require().NoError(db.Prune(200)) - - bz, err = db.Get(storeKey1Bytes, 201, key) - s.Require().NoError(err) - s.Require().Equal([]byte("val200"), bz) -} - -func DBApplyChangeset( - t *testing.T, - db store.VersionedDatabase, - version uint64, - storeKey string, - keys, vals [][]byte, -) { - t.Helper() - - require.Greater(t, version, uint64(0)) - require.Equal(t, len(keys), len(vals)) - - cs := corestore.NewChangeset() - for i := 0; i < len(keys); i++ { - remove := false - if vals[i] == nil { - remove = true - } - - cs.AddKVPair([]byte(storeKey), corestore.KVPair{Key: keys[i], Value: vals[i], Remove: remove}) - } - - require.NoError(t, db.ApplyChangeset(version, cs)) -} diff --git a/store/v2/storage/store.go b/store/v2/storage/store.go deleted file mode 100644 index 25381ee185..0000000000 --- a/store/v2/storage/store.go +++ /dev/null @@ -1,143 +0,0 @@ -package storage - -import ( - "fmt" - - "cosmossdk.io/core/log" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2" - "cosmossdk.io/store/v2/snapshots" -) - -const ( - // TODO: it is a random number, need to be tuned - defaultBatchBufferSize = 100000 -) - -var ( - _ store.VersionedDatabase = (*StorageStore)(nil) - _ snapshots.StorageSnapshotter = (*StorageStore)(nil) - _ store.Pruner = (*StorageStore)(nil) -) - -// StorageStore is a wrapper around the store.VersionedDatabase interface. -type StorageStore struct { - logger log.Logger - db Database -} - -// NewStorageStore returns a reference to a new StorageStore. -func NewStorageStore(db Database, logger log.Logger) *StorageStore { - return &StorageStore{ - logger: logger, - db: db, - } -} - -// Has returns true if the key exists in the store. -func (ss *StorageStore) Has(storeKey []byte, version uint64, key []byte) (bool, error) { - return ss.db.Has(storeKey, version, key) -} - -// Get returns the value associated with the given key. -func (ss *StorageStore) Get(storeKey []byte, version uint64, key []byte) ([]byte, error) { - return ss.db.Get(storeKey, version, key) -} - -// ApplyChangeset applies the given changeset to the storage. -func (ss *StorageStore) ApplyChangeset(version uint64, cs *corestore.Changeset) error { - b, err := ss.db.NewBatch(version) - if err != nil { - return err - } - - for _, pairs := range cs.Changes { - for _, kvPair := range pairs.StateChanges { - if kvPair.Remove { - if err := b.Delete(pairs.Actor, kvPair.Key); err != nil { - return err - } - } else { - if err := b.Set(pairs.Actor, kvPair.Key, kvPair.Value); err != nil { - return err - } - } - } - } - - if err := b.Write(); err != nil { - return err - } - - return nil -} - -// GetLatestVersion returns the latest version of the store. -func (ss *StorageStore) GetLatestVersion() (uint64, error) { - return ss.db.GetLatestVersion() -} - -// SetLatestVersion sets the latest version of the store. -func (ss *StorageStore) SetLatestVersion(version uint64) error { - return ss.db.SetLatestVersion(version) -} - -// Iterator returns an iterator over the specified domain and prefix. -func (ss *StorageStore) Iterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - return ss.db.Iterator(storeKey, version, start, end) -} - -// ReverseIterator returns an iterator over the specified domain and prefix in reverse. -func (ss *StorageStore) ReverseIterator(storeKey []byte, version uint64, start, end []byte) (corestore.Iterator, error) { - return ss.db.ReverseIterator(storeKey, version, start, end) -} - -// Prune prunes the store up to the given version. -func (ss *StorageStore) Prune(version uint64) error { - return ss.db.Prune(version) -} - -// Restore restores the store from the given channel. -func (ss *StorageStore) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error { - latestVersion, err := ss.db.GetLatestVersion() - if err != nil { - return fmt.Errorf("failed to get latest version: %w", err) - } - if version <= latestVersion { - return fmt.Errorf("the snapshot version %d is not greater than latest version %d", version, latestVersion) - } - - b, err := ss.db.NewBatch(version) - if err != nil { - return err - } - - for kvPair := range chStorage { - for _, kv := range kvPair.StateChanges { - if err := b.Set(kvPair.Actor, kv.Key, kv.Value); err != nil { - return err - } - if b.Size() > defaultBatchBufferSize { - if err := b.Write(); err != nil { - return err - } - if err := b.Reset(); err != nil { - return err - } - } - } - } - - if b.Size() > 0 { - if err := b.Write(); err != nil { - return err - } - } - - return nil -} - -// Close closes the store. -func (ss *StorageStore) Close() error { - return ss.db.Close() -} diff --git a/store/v2/storage/util/iterator.go b/store/v2/storage/util/iterator.go deleted file mode 100644 index fe207314c7..0000000000 --- a/store/v2/storage/util/iterator.go +++ /dev/null @@ -1,53 +0,0 @@ -package util - -// IterateWithPrefix returns the begin and end keys for an iterator over a domain -// and prefix. -func IterateWithPrefix(prefix, begin, end []byte) ([]byte, []byte) { - if len(prefix) == 0 { - return begin, end - } - - begin = cloneAppend(prefix, begin) - - if end == nil { - end = CopyIncr(prefix) - } else { - end = cloneAppend(prefix, end) - } - - return begin, end -} - -func cloneAppend(front, tail []byte) (res []byte) { - res = make([]byte, len(front)+len(tail)) - - n := copy(res, front) - copy(res[n:], tail) - - return res -} - -func CopyIncr(bz []byte) []byte { - if len(bz) == 0 { - panic("copyIncr expects non-zero bz length") - } - - ret := make([]byte, len(bz)) - copy(ret, bz) - - for i := len(bz) - 1; i >= 0; i-- { - if ret[i] < byte(0xFF) { - ret[i]++ - return ret - } - - ret[i] = byte(0x00) - - if i == 0 { - // overflow - return nil - } - } - - return nil -} diff --git a/store/v2/store.go b/store/v2/store.go deleted file mode 100644 index f6af274220..0000000000 --- a/store/v2/store.go +++ /dev/null @@ -1,109 +0,0 @@ -package store - -import ( - "io" - - coreheader "cosmossdk.io/core/header" - corestore "cosmossdk.io/core/store" - "cosmossdk.io/store/v2/metrics" - "cosmossdk.io/store/v2/proof" -) - -// RootStore defines an abstraction layer containing a State Storage (SS) engine -// and one or more State Commitment (SC) engines. -type RootStore interface { - // StateLatest returns a read-only version of the RootStore at the latest - // height, alongside the associated version. - StateLatest() (uint64, corestore.ReaderMap, error) - - // StateAt is analogous to StateLatest() except it returns a read-only version - // of the RootStore at the provided version. If such a version cannot be found, - // an error must be returned. - StateAt(version uint64) (corestore.ReaderMap, error) - - // GetStateStorage returns the SS backend. - GetStateStorage() VersionedDatabase - - // GetStateCommitment returns the SC backend. - GetStateCommitment() Committer - - // Query performs a query on the RootStore for a given store key, version (height), - // and key tuple. Queries should be routed to the underlying SS engine. - Query(storeKey []byte, version uint64, key []byte, prove bool) (QueryResult, error) - - // LoadVersion loads the RootStore to the given version. - LoadVersion(version uint64) error - - // LoadLatestVersion behaves identically to LoadVersion except it loads the - // latest version implicitly. - LoadLatestVersion() error - - // GetLatestVersion returns the latest version, i.e. height, committed. - GetLatestVersion() (uint64, error) - - // SetInitialVersion sets the initial version on the RootStore. - SetInitialVersion(v uint64) error - - // SetCommitHeader sets the commit header for the next commit. This call and - // implementation is optional. However, it must be supported in cases where - // queries based on block time need to be supported. - SetCommitHeader(h *coreheader.Info) - - // WorkingHash returns the current WIP commitment hash by applying the Changeset - // to the SC backend. It is only used to get the hash of the intermediate state - // before committing, the typical use case is for the genesis block. - // NOTE: It also writes the changeset to the SS backend. - WorkingHash(cs *corestore.Changeset) ([]byte, error) - - // Commit should be responsible for taking the provided changeset and flushing - // it to disk. Note, it will overwrite the changeset if WorkingHash() was called. - // Commit() should ensure the changeset is committed to all SC and SS backends - // and flushed to disk. It must return a hash of the merkle-ized committed state. - Commit(cs *corestore.Changeset) ([]byte, error) - - // LastCommitID returns a CommitID pertaining to the last commitment. - LastCommitID() (proof.CommitID, error) - - // SetMetrics sets the telemetry handler on the RootStore. - SetMetrics(m metrics.Metrics) - - io.Closer -} - -// UpgradeableRootStore extends the RootStore interface to support loading versions -// with upgrades. -type UpgradeableRootStore interface { - RootStore - - // LoadVersionAndUpgrade behaves identically to LoadVersion except it also - // accepts a StoreUpgrades object that defines a series of transformations to - // apply to store keys (if any). - // - // Note, handling StoreUpgrades is optional depending on the underlying RootStore - // implementation. - LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreUpgrades) error -} - -// Pruner defines the interface for pruning old versions of the store or database. -type Pruner interface { - // Prune prunes the store to the provided version. - Prune(version uint64) error -} - -// PausablePruner extends the Pruner interface to include the API for pausing -// the pruning process. -type PausablePruner interface { - Pruner - - // PausePruning pauses or resumes the pruning process to avoid the parallel writes - // while committing the state. - PausePruning(pause bool) -} - -// QueryResult defines the response type to performing a query on a RootStore. -type QueryResult struct { - Key []byte - Value []byte - Version uint64 - ProofOps []proof.CommitmentOp -} diff --git a/store/v2/trace.go b/store/v2/trace.go deleted file mode 100644 index a4d309a8f4..0000000000 --- a/store/v2/trace.go +++ /dev/null @@ -1,23 +0,0 @@ -package store - -import "maps" - -// TraceContext contains KVStore context data. It will be written with every -// trace operation. -type TraceContext map[string]any - -// Clone creates a shallow clone of a TraceContext. -func (tc TraceContext) Clone() TraceContext { - return maps.Clone(tc) -} - -// Merge merges the receiver TraceContext with the provided TraceContext argument. -func (tc TraceContext) Merge(newTc TraceContext) TraceContext { - if tc == nil { - tc = TraceContext{} - } - - maps.Copy(tc, newTc) - - return tc -} diff --git a/store/v2/validation.go b/store/v2/validation.go deleted file mode 100644 index a495f83b78..0000000000 --- a/store/v2/validation.go +++ /dev/null @@ -1,33 +0,0 @@ -package store - -var ( - // MaxKeyLength is the maximum allowed length for a key in bytes. - // It is set to 128K - 1 (131,071 bytes). - MaxKeyLength = (1 << 17) - 1 - - // MaxValueLength is the maximum allowed length for a value in bytes. - // It is set to 2G - 1 (2,147,483,647 bytes). - MaxValueLength = (1 << 31) - 1 -) - -// AssertValidKey checks if the key is valid, i.e. key is not nil, not empty and -// within length limit. -func AssertValidKey(key []byte) { - if len(key) == 0 { - panic("key is nil or empty") - } - if len(key) > MaxKeyLength { - panic("key is too large") - } -} - -// AssertValidValue checks if the value is valid, i.e. value is not nil and -// within length limit. -func AssertValidValue(value []byte) { - if value == nil { - panic("value is nil") - } - if len(value) > MaxValueLength { - panic("value is too large") - } -} diff --git a/store/wrapper/wrapper.go b/store/wrapper/wrapper.go deleted file mode 100644 index 33b195e706..0000000000 --- a/store/wrapper/wrapper.go +++ /dev/null @@ -1,34 +0,0 @@ -package wrapper - -import ( - dbm "github.com/cosmos/cosmos-db" - idb "github.com/cosmos/iavl/db" -) - -var _ idb.DB = &DBWrapper{} - -// DBWrapper is a simple wrapper of dbm.DB that implements the iavl.DB interface. -type DBWrapper struct { - dbm.DB -} - -// NewDBWrapper creates a new DBWrapper instance. -func NewDBWrapper(db dbm.DB) *DBWrapper { - return &DBWrapper{db} -} - -func (dbw *DBWrapper) NewBatch() idb.Batch { - return dbw.DB.NewBatch() -} - -func (dbw *DBWrapper) NewBatchWithSize(size int) idb.Batch { - return dbw.DB.NewBatchWithSize(size) -} - -func (dbw *DBWrapper) Iterator(start, end []byte) (idb.Iterator, error) { - return dbw.DB.Iterator(start, end) -} - -func (dbw *DBWrapper) ReverseIterator(start, end []byte) (idb.Iterator, error) { - return dbw.DB.ReverseIterator(start, end) -} diff --git a/tests/go.mod b/tests/go.mod index 59bdc284c6..d9922c43fc 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -11,7 +11,7 @@ require ( cosmossdk.io/log v1.3.1 cosmossdk.io/math v1.3.0 cosmossdk.io/simapp v0.0.0-20230309163709-87da587416ba - cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc + cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // main cosmossdk.io/x/evidence v0.0.0-20230613133644-0a778132a60f cosmossdk.io/x/feegrant v0.0.0-20230613133644-0a778132a60f cosmossdk.io/x/nft v0.0.0-20230613133644-0a778132a60f @@ -22,7 +22,7 @@ require ( github.com/cosmos/cosmos-db v1.0.2 github.com/cosmos/cosmos-proto v1.0.0-beta.5 // this version is not used as it is always replaced by the latest Cosmos SDK version - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/spf13/cobra v1.8.1 @@ -241,7 +241,6 @@ replace ( cosmossdk.io/collections => ../collections cosmossdk.io/core => ../core cosmossdk.io/core/testing => ../core/testing - cosmossdk.io/store => ../store cosmossdk.io/x/accounts => ../x/accounts cosmossdk.io/x/accounts/defaults/lockup => ../x/accounts/defaults/lockup cosmossdk.io/x/accounts/defaults/multisig => ../x/accounts/defaults/multisig diff --git a/tests/go.sum b/tests/go.sum index 581fde5445..2b1565dbf6 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -202,6 +202,8 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc h1:R9O9d75e0qZYUsVV0zzi+D7cNLnX2JrUOQNoIPaF0Bg= +cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc/go.mod h1:amTTatOUV3u1PsKmNb87z6/galCxrRbz9kRdJkL0DyU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= diff --git a/x/accounts/defaults/lockup/go.mod b/x/accounts/defaults/lockup/go.mod index 8326bca9eb..c629f932fe 100644 --- a/x/accounts/defaults/lockup/go.mod +++ b/x/accounts/defaults/lockup/go.mod @@ -9,7 +9,7 @@ require ( cosmossdk.io/x/bank v0.0.0-20240226161501-23359a0b6d91 cosmossdk.io/x/distribution v0.0.0-00010101000000-000000000000 cosmossdk.io/x/staking v0.0.0-00010101000000-000000000000 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 ) diff --git a/x/accounts/defaults/multisig/go.mod b/x/accounts/defaults/multisig/go.mod index 3b76d8c6ee..e6bcf63a0f 100644 --- a/x/accounts/defaults/multisig/go.mod +++ b/x/accounts/defaults/multisig/go.mod @@ -9,7 +9,7 @@ require ( cosmossdk.io/x/accounts v0.0.0-00010101000000-000000000000 cosmossdk.io/x/bank v0.0.0-20240226161501-23359a0b6d91 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/stretchr/testify v1.9.0 google.golang.org/protobuf v1.34.2 diff --git a/x/accounts/go.mod b/x/accounts/go.mod index 8b86152041..493c4632b9 100644 --- a/x/accounts/go.mod +++ b/x/accounts/go.mod @@ -11,7 +11,7 @@ require ( cosmossdk.io/x/accounts/defaults/multisig v0.0.0-00010101000000-000000000000 cosmossdk.io/x/bank v0.0.0-20240226161501-23359a0b6d91 cosmossdk.io/x/tx v0.13.3 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/spf13/cobra v1.8.1 diff --git a/x/auth/go.mod b/x/auth/go.mod index 496cdad113..749881cb55 100644 --- a/x/auth/go.mod +++ b/x/auth/go.mod @@ -16,7 +16,7 @@ require ( github.com/cometbft/cometbft v1.0.0-rc1 github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/golang/mock v1.6.0 diff --git a/x/authz/go.mod b/x/authz/go.mod index 247b6fd1f1..a01994c31d 100644 --- a/x/authz/go.mod +++ b/x/authz/go.mod @@ -8,14 +8,14 @@ require ( cosmossdk.io/depinject v1.0.0 cosmossdk.io/errors v1.0.1 cosmossdk.io/math v1.3.0 - cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc + cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // main cosmossdk.io/x/bank v0.0.0-20240226161501-23359a0b6d91 cosmossdk.io/x/staking v0.0.0-00010101000000-000000000000 cosmossdk.io/x/tx v0.13.3 github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft v1.0.0-rc1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 @@ -182,7 +182,6 @@ replace ( cosmossdk.io/collections => ../../collections cosmossdk.io/core => ../../core cosmossdk.io/core/testing => ../../core/testing - cosmossdk.io/store => ../../store cosmossdk.io/x/accounts => ../accounts cosmossdk.io/x/auth => ../auth cosmossdk.io/x/bank => ../bank diff --git a/x/authz/go.sum b/x/authz/go.sum index 8d4b41eff9..4e26996062 100644 --- a/x/authz/go.sum +++ b/x/authz/go.sum @@ -14,6 +14,8 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc h1:R9O9d75e0qZYUsVV0zzi+D7cNLnX2JrUOQNoIPaF0Bg= +cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc/go.mod h1:amTTatOUV3u1PsKmNb87z6/galCxrRbz9kRdJkL0DyU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= diff --git a/x/bank/go.mod b/x/bank/go.mod index b6b501ce5d..5adad8fffa 100644 --- a/x/bank/go.mod +++ b/x/bank/go.mod @@ -14,7 +14,7 @@ require ( github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cometbft/cometbft v1.0.0-rc1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/circuit/go.mod b/x/circuit/go.mod index 66882b7cb4..d29e68f66a 100644 --- a/x/circuit/go.mod +++ b/x/circuit/go.mod @@ -11,7 +11,7 @@ require ( cosmossdk.io/errors v1.0.1 cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc cosmossdk.io/x/auth v0.0.0-00010101000000-000000000000 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 diff --git a/x/consensus/go.mod b/x/consensus/go.mod index 51170b1e00..f5101d0cec 100644 --- a/x/consensus/go.mod +++ b/x/consensus/go.mod @@ -12,7 +12,7 @@ require ( github.com/cometbft/cometbft v1.0.0-rc1 github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 diff --git a/x/distribution/go.mod b/x/distribution/go.mod index 00ba358d49..2df0c64205 100644 --- a/x/distribution/go.mod +++ b/x/distribution/go.mod @@ -16,7 +16,7 @@ require ( cosmossdk.io/x/protocolpool v0.0.0-20230925135524-a1bc045b3190 cosmossdk.io/x/staking v0.0.0-00010101000000-000000000000 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/epochs/go.mod b/x/epochs/go.mod index 6f41f53eb5..6b89038af7 100644 --- a/x/epochs/go.mod +++ b/x/epochs/go.mod @@ -11,7 +11,7 @@ require ( cosmossdk.io/errors v1.0.1 cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 diff --git a/x/evidence/go.mod b/x/evidence/go.mod index a9157cd12a..cf99039675 100644 --- a/x/evidence/go.mod +++ b/x/evidence/go.mod @@ -13,7 +13,7 @@ require ( cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc cosmossdk.io/x/consensus v0.0.0-00010101000000-000000000000 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/feegrant/go.mod b/x/feegrant/go.mod index 0466b29aec..46589a605e 100644 --- a/x/feegrant/go.mod +++ b/x/feegrant/go.mod @@ -15,7 +15,7 @@ require ( cosmossdk.io/x/gov v0.0.0-20230925135524-a1bc045b3190 github.com/cometbft/cometbft v1.0.0-rc1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/gov/go.mod b/x/gov/go.mod index d3919425c2..0bdb6ba646 100644 --- a/x/gov/go.mod +++ b/x/gov/go.mod @@ -19,7 +19,7 @@ require ( github.com/chzyer/readline v1.5.1 github.com/cometbft/cometbft v1.0.0-rc1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/group/go.mod b/x/group/go.mod index 2e094a88e9..49193d54da 100644 --- a/x/group/go.mod +++ b/x/group/go.mod @@ -23,7 +23,7 @@ require ( github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-db v1.0.2 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/mint/go.mod b/x/mint/go.mod index bc9afcf5b5..69548c40a3 100644 --- a/x/mint/go.mod +++ b/x/mint/go.mod @@ -15,7 +15,7 @@ require ( cosmossdk.io/x/epochs v0.0.0-20240522060652-a1ae4c3e0337 github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/nft/go.mod b/x/nft/go.mod index 73145f3550..ee56b3001d 100644 --- a/x/nft/go.mod +++ b/x/nft/go.mod @@ -11,7 +11,7 @@ require ( cosmossdk.io/math v1.3.0 cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/params/go.mod b/x/params/go.mod index 9e36db1bf2..636e35bca3 100644 --- a/x/params/go.mod +++ b/x/params/go.mod @@ -14,7 +14,7 @@ require ( github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-db v1.0.2 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/protocolpool/go.mod b/x/protocolpool/go.mod index 6cc5725dda..ddecb0f203 100644 --- a/x/protocolpool/go.mod +++ b/x/protocolpool/go.mod @@ -13,7 +13,7 @@ require ( cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc cosmossdk.io/x/auth v0.0.0-00010101000000-000000000000 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/slashing/go.mod b/x/slashing/go.mod index 4e4909fe67..c6a3243ebd 100644 --- a/x/slashing/go.mod +++ b/x/slashing/go.mod @@ -15,7 +15,7 @@ require ( cosmossdk.io/x/staking v0.0.0-00010101000000-000000000000 github.com/bits-and-blooms/bitset v1.10.0 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/staking/go.mod b/x/staking/go.mod index 58980ee7b7..2be10f7e22 100644 --- a/x/staking/go.mod +++ b/x/staking/go.mod @@ -14,7 +14,7 @@ require ( github.com/cometbft/cometbft v1.0.0-rc1 github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.4 diff --git a/x/upgrade/go.mod b/x/upgrade/go.mod index 67c1440368..bc22f94ea9 100644 --- a/x/upgrade/go.mod +++ b/x/upgrade/go.mod @@ -9,7 +9,7 @@ require ( cosmossdk.io/depinject v1.0.0 cosmossdk.io/errors v1.0.1 cosmossdk.io/log v1.3.1 - cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc + cosmossdk.io/store v1.1.1-0.20240418092142-896cdf1971bc // main cosmossdk.io/x/auth v0.0.0-00010101000000-000000000000 cosmossdk.io/x/consensus v0.0.0-00010101000000-000000000000 cosmossdk.io/x/gov v0.0.0-20230925135524-a1bc045b3190 @@ -17,7 +17,7 @@ require ( github.com/cometbft/cometbft/api v1.0.0-rc.1 github.com/cosmos/cosmos-db v1.0.2 github.com/cosmos/cosmos-proto v1.0.0-beta.5 - github.com/cosmos/cosmos-sdk v0.51.0 + github.com/cosmos/cosmos-sdk v0.52.0 github.com/cosmos/gogoproto v1.5.0 github.com/golang/protobuf v1.5.4 github.com/grpc-ecosystem/grpc-gateway v1.16.0 @@ -208,7 +208,7 @@ replace ( cosmossdk.io/api => ../../api cosmossdk.io/core => ../../core cosmossdk.io/core/testing => ../../core/testing - cosmossdk.io/store => ../../store + cosmossdk.io/store => cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 // main cosmossdk.io/x/accounts => ../accounts cosmossdk.io/x/auth => ../auth cosmossdk.io/x/bank => ../bank diff --git a/x/upgrade/go.sum b/x/upgrade/go.sum index 5e17696369..c13ed1bbc0 100644 --- a/x/upgrade/go.sum +++ b/x/upgrade/go.sum @@ -204,6 +204,8 @@ cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= cosmossdk.io/schema v0.1.1 h1:I0M6pgI7R10nq+/HCQfbO6BsGBZA8sQy+duR1Y3aKcA= cosmossdk.io/schema v0.1.1/go.mod h1:RDAhxIeNB4bYqAlF4NBJwRrgtnciMcyyg0DOKnhNZQQ= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6 h1:lhyOHcIJU+IB6i5sO36DWC2r4QXDEk/bsno7jrTr28k= +cosmossdk.io/store v1.0.0-rc.0.0.20240731205446-aee9803a0af6/go.mod h1:CY8wAToETz/dmuuKwf/qfXEImtey4jWdWWcoavfQWNw= cosmossdk.io/x/protocolpool v0.0.0-20230925135524-a1bc045b3190 h1:XQJj9Dv9Gtze0l2TF79BU5lkP6MkUveTUuKICmxoz+o= cosmossdk.io/x/protocolpool v0.0.0-20230925135524-a1bc045b3190/go.mod h1:7WUGupOvmlHJoIMBz1JbObQxeo6/TDiuDBxmtod8HRg= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=