test(system): Introduce system tests - initial version heavily inspired by wasmd (#20013)
This commit is contained in:
parent
abb2994a8f
commit
3d1925d476
2
.github/workflows/gosec.yml
vendored
2
.github/workflows/gosec.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
||||
uses: securego/gosec@master
|
||||
with:
|
||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||
args: "-exclude=G101,G107 -no-fail -fmt sarif -out results.sarif ./..."
|
||||
args: "-exclude=G101,G107 -exclude-dir=systemtests -no-fail -fmt sarif -out results.sarif ./..."
|
||||
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
|
||||
53
.github/workflows/test.yml
vendored
53
.github/workflows/test.yml
vendored
@ -144,6 +144,57 @@ jobs:
|
||||
name: "${{ github.sha }}-e2e-coverage"
|
||||
path: ./tests/e2e-profile.out
|
||||
|
||||
test-system:
|
||||
needs: [tests, test-integration, test-e2e]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-tags: true
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
check-latest: true
|
||||
cache: true
|
||||
cache-dependency-path: |
|
||||
simapp/go.sum
|
||||
systemtest/go.sum
|
||||
- uses: technote-space/get-diff-action@v6.1.2
|
||||
id: git_diff
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
**/go.mod
|
||||
**/go.sum
|
||||
**/Makefile
|
||||
Makefile
|
||||
- name: Install musl lib for simd (docker) binary
|
||||
if: env.GIT_DIFF
|
||||
run: |
|
||||
sudo apt-get install -y musl
|
||||
- name: system tests v1
|
||||
if: env.GIT_DIFF
|
||||
run: |
|
||||
COSMOS_BUILD_OPTIONS=legacy make test-system
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: "testnet-setup"
|
||||
path: ./systemtests/testnet/
|
||||
retention-days: 3
|
||||
# - name: system tests v2
|
||||
# if: env.GIT_DIFF
|
||||
# run: |
|
||||
# make test-system
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: failure()
|
||||
with:
|
||||
name: "testnet-setup"
|
||||
path: ./systemtests/testnet/
|
||||
retention-days: 3
|
||||
|
||||
repo-analysis:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [tests, test-integration, test-e2e]
|
||||
@ -1245,7 +1296,7 @@ jobs:
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
with:
|
||||
projectBaseDir: x/mint/
|
||||
|
||||
|
||||
test-x-epochs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
@ -41,7 +41,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
|
||||
Every module contains its own CHANGELOG.md. Please refer to the module you are interested in.
|
||||
|
||||
### Features
|
||||
|
||||
* (tests) [#20013](https://github.com/cosmos/cosmos-sdk/pull/20013) Introduce system tests to run multi node local testnet in CI
|
||||
* (runtime) [#19953](https://github.com/cosmos/cosmos-sdk/pull/19953) Implement `core/transaction.Service` in runtime.
|
||||
* (client) [#19905](https://github.com/cosmos/cosmos-sdk/pull/19905) Add grpc client config to `client.toml`.
|
||||
* (runtime) [#19571](https://github.com/cosmos/cosmos-sdk/pull/19571) Implement `core/router.Service` in runtime. This service is present in all modules (when using depinject).
|
||||
|
||||
11
Makefile
11
Makefile
@ -13,6 +13,10 @@ HTTPS_GIT := https://github.com/cosmos/cosmos-sdk.git
|
||||
DOCKER := $(shell which docker)
|
||||
PROJECT_NAME = $(shell git remote get-url origin | xargs basename -s .git)
|
||||
|
||||
ifeq ($(findstring .,$(VERSION)),)
|
||||
VERSION := 0.0.0
|
||||
endif
|
||||
|
||||
# process build tags
|
||||
build_tags = netgo
|
||||
ifeq ($(LEDGER_ENABLED),true)
|
||||
@ -214,6 +218,13 @@ test-integration-cov:
|
||||
#? test-all: Run all test
|
||||
test-all: test-unit test-e2e test-integration test-ledger-mock test-race
|
||||
|
||||
.PHONY: test-system
|
||||
test-system: build
|
||||
mkdir -p ./systemtests/binaries/
|
||||
cp $(BUILDDIR)/simd ./systemtests/binaries/
|
||||
$(MAKE) -C systemtests test
|
||||
|
||||
|
||||
TEST_PACKAGES=./...
|
||||
TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-race test-ledger test-race
|
||||
|
||||
|
||||
2
systemtests/.gitignore
vendored
Normal file
2
systemtests/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/testnet
|
||||
/binaries
|
||||
15
systemtests/Makefile
Normal file
15
systemtests/Makefile
Normal file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
WAIT_TIME ?= 45s
|
||||
|
||||
all: test
|
||||
|
||||
test:
|
||||
go test -mod=readonly -failfast -tags='system_test' ./... --wait-time=$(WAIT_TIME) --verbose
|
||||
|
||||
format:
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/lcd/statik/statik.go" | xargs gofumpt -w
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "*.git*" -not -path "./client/lcd/statik/statik.go" | xargs misspell -w
|
||||
find . -name '*.go' -type f -not -path "./vendor*" -not -path "./tests/system/vendor*" -not -path "*.git*" -not -path "./client/lcd/statik/statik.go" | xargs gci write --skip-generated -s standard -s default -s "prefix(cosmossdk.io)" -s "prefix(github.com/cosmos/cosmos-sdk)" --custom-order
|
||||
|
||||
.PHONY: all test format
|
||||
50
systemtests/README.md
Normal file
50
systemtests/README.md
Normal file
@ -0,0 +1,50 @@
|
||||
# Testing
|
||||
|
||||
Test framework for system tests.
|
||||
Starts and interacts with a (multi node) blockchain in Go.
|
||||
Supports
|
||||
* CLI
|
||||
* Servers
|
||||
* Events
|
||||
* RPC
|
||||
|
||||
Uses:
|
||||
* testify
|
||||
* gjson
|
||||
* sjson
|
||||
Server and client side are executed on the host machine
|
||||
|
||||
## Developer
|
||||
### Test strategy
|
||||
System tests cover the full stack via cli and a running (multi node) network. They are more expensive (in terms of time/ cpu)
|
||||
to run compared to unit or integration tests.
|
||||
Therefore, we focus on the **critical path** and do not cover every condition.
|
||||
|
||||
### Execute a single test
|
||||
|
||||
```sh
|
||||
go test -tags system_test -count=1 -v . --run TestStakeUnstake -verbose
|
||||
```
|
||||
|
||||
Test cli parameters
|
||||
|
||||
* `-verbose` verbose output
|
||||
* `-wait-time` duration - time to wait for chain events (default 30s)
|
||||
* `-nodes-count` int - number of nodes in the cluster (default 4)
|
||||
|
||||
# Port ranges
|
||||
With *n* nodes:
|
||||
* `26657` - `26657+n` - RPC
|
||||
* `1317` - `1317+n` - API
|
||||
* `9090` - `9090+n` - GRPC
|
||||
* `16656` - `16656+n` - P2P
|
||||
|
||||
For example Node *3* listens on `26660` for RPC calls
|
||||
|
||||
## Resources
|
||||
|
||||
* [gjson query syntax](https://github.com/tidwall/gjson#path-syntax)
|
||||
|
||||
## Disclaimer
|
||||
|
||||
This is based on the system test framework in [wasmd](https://github.com/CosmWasm/wasmd) built by Confio.
|
||||
458
systemtests/cli.go
Normal file
458
systemtests/cli.go
Normal file
@ -0,0 +1,458 @@
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tidwall/gjson"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client/grpc/cmtservice"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
|
||||
"github.com/cosmos/cosmos-sdk/std"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
type (
|
||||
// blocks until next block is minted
|
||||
awaitNextBlock func(t *testing.T, timeout ...time.Duration) int64
|
||||
// RunErrorAssert is custom type that is satisfies by testify matchers as well
|
||||
RunErrorAssert func(t assert.TestingT, err error, msgAndArgs ...interface{}) (ok bool)
|
||||
)
|
||||
|
||||
// CLIWrapper provides a more convenient way to interact with the CLI binary from the Go tests
|
||||
type CLIWrapper struct {
|
||||
t *testing.T
|
||||
nodeAddress string
|
||||
chainID string
|
||||
homeDir string
|
||||
fees string
|
||||
Debug bool
|
||||
assertErrorFn RunErrorAssert
|
||||
awaitNextBlock awaitNextBlock
|
||||
expTXCommitted bool
|
||||
execBinary string
|
||||
nodesCount int
|
||||
}
|
||||
|
||||
// NewCLIWrapper constructor
|
||||
func NewCLIWrapper(t *testing.T, sut *SystemUnderTest, verbose bool) *CLIWrapper {
|
||||
t.Helper()
|
||||
return NewCLIWrapperX(
|
||||
t,
|
||||
sut.execBinary,
|
||||
sut.rpcAddr,
|
||||
sut.chainID,
|
||||
sut.AwaitNextBlock,
|
||||
sut.nodesCount,
|
||||
filepath.Join(WorkDir, sut.outputDir),
|
||||
"1"+sdk.DefaultBondDenom,
|
||||
verbose,
|
||||
assert.NoError,
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
// NewCLIWrapperX extended constructor
|
||||
func NewCLIWrapperX(
|
||||
t *testing.T,
|
||||
execBinary string,
|
||||
nodeAddress string,
|
||||
chainID string,
|
||||
awaiter awaitNextBlock,
|
||||
nodesCount int,
|
||||
homeDir string,
|
||||
fees string,
|
||||
debug bool,
|
||||
assertErrorFn RunErrorAssert,
|
||||
expTXCommitted bool,
|
||||
) *CLIWrapper {
|
||||
t.Helper()
|
||||
if strings.TrimSpace(execBinary) == "" {
|
||||
t.Fatal("name of executable binary must not be empty")
|
||||
}
|
||||
return &CLIWrapper{
|
||||
t: t,
|
||||
execBinary: execBinary,
|
||||
nodeAddress: nodeAddress,
|
||||
chainID: chainID,
|
||||
homeDir: homeDir,
|
||||
Debug: debug,
|
||||
awaitNextBlock: awaiter,
|
||||
nodesCount: nodesCount,
|
||||
fees: fees,
|
||||
assertErrorFn: assertErrorFn,
|
||||
expTXCommitted: expTXCommitted,
|
||||
}
|
||||
}
|
||||
|
||||
// WithRunErrorsIgnored does not fail on any error
|
||||
func (c CLIWrapper) WithRunErrorsIgnored() CLIWrapper {
|
||||
return c.WithRunErrorMatcher(func(t assert.TestingT, err error, msgAndArgs ...interface{}) bool {
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// WithRunErrorMatcher assert function to ensure run command error value
|
||||
func (c CLIWrapper) WithRunErrorMatcher(f RunErrorAssert) CLIWrapper {
|
||||
return *NewCLIWrapperX(
|
||||
c.t,
|
||||
c.execBinary,
|
||||
c.nodeAddress,
|
||||
c.chainID,
|
||||
c.awaitNextBlock,
|
||||
c.nodesCount,
|
||||
c.homeDir,
|
||||
c.fees,
|
||||
c.Debug,
|
||||
f,
|
||||
c.expTXCommitted,
|
||||
)
|
||||
}
|
||||
|
||||
func (c CLIWrapper) WithNodeAddress(nodeAddr string) CLIWrapper {
|
||||
return *NewCLIWrapperX(
|
||||
c.t,
|
||||
c.execBinary,
|
||||
nodeAddr,
|
||||
c.chainID,
|
||||
c.awaitNextBlock,
|
||||
c.nodesCount,
|
||||
c.homeDir,
|
||||
c.fees,
|
||||
c.Debug,
|
||||
c.assertErrorFn,
|
||||
c.expTXCommitted,
|
||||
)
|
||||
}
|
||||
|
||||
func (c CLIWrapper) WithAssertTXUncommitted() CLIWrapper {
|
||||
return *NewCLIWrapperX(
|
||||
c.t,
|
||||
c.execBinary,
|
||||
c.nodeAddress,
|
||||
c.chainID,
|
||||
c.awaitNextBlock,
|
||||
c.nodesCount,
|
||||
c.homeDir,
|
||||
c.fees,
|
||||
c.Debug,
|
||||
c.assertErrorFn,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Run main entry for executing cli commands.
|
||||
// When configured, method blocks until tx is committed.
|
||||
func (c CLIWrapper) Run(args ...string) string {
|
||||
if c.fees != "" && !slices.ContainsFunc(args, func(s string) bool {
|
||||
return strings.HasPrefix(s, "--fees")
|
||||
}) {
|
||||
args = append(args, "--fees="+c.fees) // add default fee
|
||||
}
|
||||
args = c.withTXFlags(args...)
|
||||
execOutput, ok := c.run(args)
|
||||
if !ok {
|
||||
return execOutput
|
||||
}
|
||||
rsp, committed := c.awaitTxCommitted(execOutput, DefaultWaitTime)
|
||||
c.t.Logf("tx committed: %v", committed)
|
||||
require.Equal(c.t, c.expTXCommitted, committed, "expected tx committed: %v", c.expTXCommitted)
|
||||
return rsp
|
||||
}
|
||||
|
||||
// wait for tx committed on chain
|
||||
func (c CLIWrapper) awaitTxCommitted(submitResp string, timeout ...time.Duration) (string, bool) {
|
||||
RequireTxSuccess(c.t, submitResp)
|
||||
txHash := gjson.Get(submitResp, "txhash")
|
||||
require.True(c.t, txHash.Exists())
|
||||
var txResult string
|
||||
for i := 0; i < 3; i++ { // max blocks to wait for a commit
|
||||
txResult = c.WithRunErrorsIgnored().CustomQuery("q", "tx", txHash.String())
|
||||
if code := gjson.Get(txResult, "code"); code.Exists() {
|
||||
if code.Int() != 0 { // 0 = success code
|
||||
c.t.Logf("+++ got error response code: %s\n", txResult)
|
||||
}
|
||||
return txResult, true
|
||||
}
|
||||
c.awaitNextBlock(c.t, timeout...)
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Keys wasmd keys CLI command
|
||||
func (c CLIWrapper) Keys(args ...string) string {
|
||||
args = c.withKeyringFlags(args...)
|
||||
out, _ := c.run(args)
|
||||
return out
|
||||
}
|
||||
|
||||
// CustomQuery main entrypoint for wasmd CLI queries
|
||||
func (c CLIWrapper) CustomQuery(args ...string) string {
|
||||
args = c.withQueryFlags(args...)
|
||||
out, _ := c.run(args)
|
||||
return out
|
||||
}
|
||||
|
||||
// execute shell command
|
||||
func (c CLIWrapper) run(args []string) (output string, ok bool) {
|
||||
return c.runWithInput(args, nil)
|
||||
}
|
||||
|
||||
func (c CLIWrapper) runWithInput(args []string, input io.Reader) (output string, ok bool) {
|
||||
if c.Debug {
|
||||
c.t.Logf("+++ running `%s %s`", c.execBinary, strings.Join(args, " "))
|
||||
}
|
||||
gotOut, gotErr := func() (out []byte, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("recovered from panic: %v", r)
|
||||
}
|
||||
}()
|
||||
cmd := exec.Command(locateExecutable(c.execBinary), args...) //nolint:gosec // test code only
|
||||
cmd.Dir = WorkDir
|
||||
cmd.Stdin = input
|
||||
return cmd.CombinedOutput()
|
||||
}()
|
||||
ok = c.assertErrorFn(c.t, gotErr, string(gotOut))
|
||||
return strings.TrimSpace(string(gotOut)), ok
|
||||
}
|
||||
|
||||
func (c CLIWrapper) withQueryFlags(args ...string) []string {
|
||||
args = append(args, "--output", "json")
|
||||
return c.withChainFlags(args...)
|
||||
}
|
||||
|
||||
func (c CLIWrapper) withTXFlags(args ...string) []string {
|
||||
args = append(args,
|
||||
"--broadcast-mode", "sync",
|
||||
"--output", "json",
|
||||
"--yes",
|
||||
"--chain-id", c.chainID,
|
||||
)
|
||||
args = c.withKeyringFlags(args...)
|
||||
return c.withChainFlags(args...)
|
||||
}
|
||||
|
||||
func (c CLIWrapper) withKeyringFlags(args ...string) []string {
|
||||
r := append(args,
|
||||
"--home", c.homeDir,
|
||||
"--keyring-backend", "test",
|
||||
)
|
||||
for _, v := range args {
|
||||
if v == "-a" || v == "--address" { // show address only
|
||||
return r
|
||||
}
|
||||
}
|
||||
return append(r, "--output", "json")
|
||||
}
|
||||
|
||||
func (c CLIWrapper) withChainFlags(args ...string) []string {
|
||||
return append(args,
|
||||
"--node", c.nodeAddress,
|
||||
)
|
||||
}
|
||||
|
||||
// WasmExecute send MsgExecute to a contract
|
||||
func (c CLIWrapper) WasmExecute(contractAddr, msg, from string, args ...string) string {
|
||||
cmd := append([]string{"tx", "wasm", "execute", contractAddr, msg, "--from", from}, args...)
|
||||
return c.Run(cmd...)
|
||||
}
|
||||
|
||||
// AddKey add key to default keyring. Returns address
|
||||
func (c CLIWrapper) AddKey(name string) string {
|
||||
cmd := c.withKeyringFlags("keys", "add", name, "--no-backup")
|
||||
out, _ := c.run(cmd)
|
||||
addr := gjson.Get(out, "address").String()
|
||||
require.NotEmpty(c.t, addr, "got %q", out)
|
||||
return addr
|
||||
}
|
||||
|
||||
// AddKeyFromSeed recovers the key from given seed and add it to default keyring. Returns address
|
||||
func (c CLIWrapper) AddKeyFromSeed(name, mnemoic string) string {
|
||||
cmd := c.withKeyringFlags("keys", "add", name, "--recover")
|
||||
out, _ := c.runWithInput(cmd, strings.NewReader(mnemoic))
|
||||
addr := gjson.Get(out, "address").String()
|
||||
require.NotEmpty(c.t, addr, "got %q", out)
|
||||
return addr
|
||||
}
|
||||
|
||||
// GetKeyAddr returns address
|
||||
func (c CLIWrapper) GetKeyAddr(name string) string {
|
||||
cmd := c.withKeyringFlags("keys", "show", name, "-a")
|
||||
out, _ := c.run(cmd)
|
||||
addr := strings.Trim(out, "\n")
|
||||
require.NotEmpty(c.t, addr, "got %q", out)
|
||||
return addr
|
||||
}
|
||||
|
||||
const defaultSrcAddr = "node0"
|
||||
|
||||
// FundAddress sends the token amount to the destination address
|
||||
func (c CLIWrapper) FundAddress(destAddr, amount string) string {
|
||||
require.NotEmpty(c.t, destAddr)
|
||||
require.NotEmpty(c.t, amount)
|
||||
cmd := []string{"tx", "bank", "send", defaultSrcAddr, destAddr, amount}
|
||||
rsp := c.Run(cmd...)
|
||||
RequireTxSuccess(c.t, rsp)
|
||||
return rsp
|
||||
}
|
||||
|
||||
// QueryBalances queries all balances for an account. Returns json response
|
||||
// Example:`{"balances":[{"denom":"node0token","amount":"1000000000"},{"denom":"stake","amount":"400000003"}],"pagination":{}}`
|
||||
func (c CLIWrapper) QueryBalances(addr string) string {
|
||||
return c.CustomQuery("q", "bank", "balances", addr)
|
||||
}
|
||||
|
||||
// QueryBalance returns balance amount for given denom.
|
||||
// 0 when not found
|
||||
func (c CLIWrapper) QueryBalance(addr, denom string) int64 {
|
||||
raw := c.CustomQuery("q", "bank", "balance", addr, denom)
|
||||
require.Contains(c.t, raw, "amount", raw)
|
||||
return gjson.Get(raw, "balance.amount").Int()
|
||||
}
|
||||
|
||||
// QueryTotalSupply returns total amount of tokens for a given denom.
|
||||
// 0 when not found
|
||||
func (c CLIWrapper) QueryTotalSupply(denom string) int64 {
|
||||
raw := c.CustomQuery("q", "bank", "total-supply")
|
||||
require.Contains(c.t, raw, "amount", raw)
|
||||
return gjson.Get(raw, fmt.Sprintf("supply.#(denom==%q).amount", denom)).Int()
|
||||
}
|
||||
|
||||
func (c CLIWrapper) GetCometBFTValidatorSet() cmtservice.GetLatestValidatorSetResponse {
|
||||
args := []string{"q", "comet-validator-set"}
|
||||
got := c.CustomQuery(args...)
|
||||
|
||||
// still using amino here as the SDK
|
||||
amino := codec.NewLegacyAmino()
|
||||
std.RegisterLegacyAminoCodec(amino)
|
||||
std.RegisterInterfaces(codectypes.NewInterfaceRegistry())
|
||||
|
||||
var res cmtservice.GetLatestValidatorSetResponse
|
||||
require.NoError(c.t, amino.UnmarshalJSON([]byte(got), &res), got)
|
||||
return res
|
||||
}
|
||||
|
||||
// IsInCometBftValset returns true when the given pub key is in the current active tendermint validator set
|
||||
func (c CLIWrapper) IsInCometBftValset(valPubKey cryptotypes.PubKey) (cmtservice.GetLatestValidatorSetResponse, bool) {
|
||||
valResult := c.GetCometBFTValidatorSet()
|
||||
var found bool
|
||||
for _, v := range valResult.Validators {
|
||||
if v.PubKey.Equal(valPubKey) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return valResult, found
|
||||
}
|
||||
|
||||
// SubmitGovProposal submit a gov v1 proposal
|
||||
func (c CLIWrapper) SubmitGovProposal(proposalJson string, args ...string) string {
|
||||
if len(args) == 0 {
|
||||
args = []string{"--from=" + defaultSrcAddr}
|
||||
}
|
||||
|
||||
pathToProposal := filepath.Join(c.t.TempDir(), "proposal.json")
|
||||
err := os.WriteFile(pathToProposal, []byte(proposalJson), os.FileMode(0o744))
|
||||
require.NoError(c.t, err)
|
||||
c.t.Log("Submit upgrade proposal")
|
||||
return c.Run(append([]string{"tx", "gov", "submit-proposal", pathToProposal}, args...)...)
|
||||
}
|
||||
|
||||
// SubmitAndVoteGovProposal submit proposal, let all validators vote yes and return proposal id
|
||||
func (c CLIWrapper) SubmitAndVoteGovProposal(proposalJson string, args ...string) string {
|
||||
rsp := c.SubmitGovProposal(proposalJson, args...)
|
||||
RequireTxSuccess(c.t, rsp)
|
||||
raw := c.CustomQuery("q", "gov", "proposals", "--depositor", c.GetKeyAddr(defaultSrcAddr))
|
||||
proposals := gjson.Get(raw, "proposals.#.id").Array()
|
||||
require.NotEmpty(c.t, proposals, raw)
|
||||
ourProposalID := proposals[len(proposals)-1].String() // last is ours
|
||||
for i := 0; i < c.nodesCount; i++ {
|
||||
go func(i int) { // do parallel
|
||||
c.t.Logf("Voting: validator %d\n", i)
|
||||
rsp = c.Run("tx", "gov", "vote", ourProposalID, "yes", "--from", c.GetKeyAddr(fmt.Sprintf("node%d", i)))
|
||||
RequireTxSuccess(c.t, rsp)
|
||||
}(i)
|
||||
}
|
||||
return ourProposalID
|
||||
}
|
||||
|
||||
// Version returns the current version of the client binary
|
||||
func (c CLIWrapper) Version() string {
|
||||
v, ok := c.run([]string{"version"})
|
||||
require.True(c.t, ok)
|
||||
return v
|
||||
}
|
||||
|
||||
// RequireTxSuccess require the received response to contain the success code
|
||||
func RequireTxSuccess(t *testing.T, got string) {
|
||||
t.Helper()
|
||||
code, details := parseResultCode(t, got)
|
||||
require.Equal(t, int64(0), code, "non success tx code : %s", details)
|
||||
}
|
||||
|
||||
// RequireTxFailure require the received response to contain any failure code and the passed msgsgs
|
||||
func RequireTxFailure(t *testing.T, got string, containsMsgs ...string) {
|
||||
t.Helper()
|
||||
code, details := parseResultCode(t, got)
|
||||
require.NotEqual(t, int64(0), code, details)
|
||||
for _, msg := range containsMsgs {
|
||||
require.Contains(t, details, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func parseResultCode(t *testing.T, got string) (int64, string) {
|
||||
t.Helper()
|
||||
code := gjson.Get(got, "code")
|
||||
require.True(t, code.Exists(), "got response: %s", got)
|
||||
|
||||
details := got
|
||||
if log := gjson.Get(got, "raw_log"); log.Exists() {
|
||||
details = log.String()
|
||||
}
|
||||
return code.Int(), details
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrOutOfGasMatcher requires error with "out of gas" message
|
||||
ErrOutOfGasMatcher RunErrorAssert = func(t assert.TestingT, err error, args ...interface{}) bool {
|
||||
const oogMsg = "out of gas"
|
||||
return expErrWithMsg(t, err, args, oogMsg)
|
||||
}
|
||||
// ErrTimeoutMatcher requires time out message
|
||||
ErrTimeoutMatcher RunErrorAssert = func(t assert.TestingT, err error, args ...interface{}) bool {
|
||||
const expMsg = "timed out waiting for tx to be included in a block"
|
||||
return expErrWithMsg(t, err, args, expMsg)
|
||||
}
|
||||
// ErrPostFailedMatcher requires post failed
|
||||
ErrPostFailedMatcher RunErrorAssert = func(t assert.TestingT, err error, args ...interface{}) bool {
|
||||
const expMsg = "post failed"
|
||||
return expErrWithMsg(t, err, args, expMsg)
|
||||
}
|
||||
)
|
||||
|
||||
func expErrWithMsg(t assert.TestingT, err error, args []interface{}, expMsg string) bool {
|
||||
if ok := assert.Error(t, err, args); !ok {
|
||||
return false
|
||||
}
|
||||
var found bool
|
||||
for _, v := range args {
|
||||
if strings.Contains(fmt.Sprintf("%s", v), expMsg) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "expected %q but got: %s", expMsg, args)
|
||||
return false // always abort
|
||||
}
|
||||
46
systemtests/genesis_io.go
Normal file
46
systemtests/genesis_io.go
Normal file
@ -0,0 +1,46 @@
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tidwall/gjson"
|
||||
"github.com/tidwall/sjson"
|
||||
|
||||
sdkmath "cosmossdk.io/math"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
// SetConsensusMaxGas max gas that can be consumed in a block
|
||||
func SetConsensusMaxGas(t *testing.T, max int) GenesisMutator {
|
||||
t.Helper()
|
||||
return func(genesis []byte) []byte {
|
||||
state, err := sjson.SetRawBytes(genesis, "consensus.params.block.max_gas", []byte(fmt.Sprintf(`"%d"`, max)))
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
}
|
||||
}
|
||||
|
||||
func SetGovVotingPeriod(t *testing.T, period time.Duration) GenesisMutator {
|
||||
t.Helper()
|
||||
return func(genesis []byte) []byte {
|
||||
state, err := sjson.SetRawBytes(genesis, "app_state.gov.params.voting_period", []byte(fmt.Sprintf("%q", period.String())))
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
}
|
||||
}
|
||||
|
||||
// GetGenesisBalance return the balance amount for an address from the given genesis json
|
||||
func GetGenesisBalance(rawGenesis []byte, addr string) sdk.Coins {
|
||||
var r []sdk.Coin
|
||||
balances := gjson.GetBytes(rawGenesis, fmt.Sprintf(`app_state.bank.balances.#[address==%q]#.coins`, addr)).Array()
|
||||
for _, coins := range balances {
|
||||
for _, coin := range coins.Array() {
|
||||
r = append(r, sdk.NewCoin(coin.Get("denom").String(), sdkmath.NewInt(coin.Get("amount").Int())))
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
162
systemtests/go.mod
Normal file
162
systemtests/go.mod
Normal file
@ -0,0 +1,162 @@
|
||||
module cosmossdk.io/systemtests
|
||||
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/cosmos/cosmos-proto v1.0.0-beta.4 // indirect
|
||||
github.com/cosmos/cosmos-sdk v0.50.5
|
||||
github.com/cosmos/gogogateway v1.2.0 // indirect
|
||||
github.com/cosmos/gogoproto v1.4.12 // indirect
|
||||
github.com/cosmos/iavl v1.0.1 // indirect
|
||||
github.com/dvsekhvalnov/jose2go v1.6.0 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.18.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/grpc v1.63.2 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cosmossdk.io/math v1.3.0
|
||||
github.com/cometbft/cometbft v0.38.5
|
||||
github.com/creachadair/tomledit v0.0.26
|
||||
github.com/tidwall/gjson v1.14.2
|
||||
github.com/tidwall/sjson v1.2.5
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
)
|
||||
|
||||
require (
|
||||
cosmossdk.io/api v0.7.4 // indirect
|
||||
cosmossdk.io/collections v0.4.0 // indirect
|
||||
cosmossdk.io/core v0.11.0 // indirect
|
||||
cosmossdk.io/depinject v1.0.0-alpha.4 // indirect
|
||||
cosmossdk.io/errors v1.0.1 // indirect
|
||||
cosmossdk.io/log v1.3.1 // indirect
|
||||
cosmossdk.io/store v1.0.2 // indirect
|
||||
cosmossdk.io/x/tx v0.13.3-0.20240419091757-db5906b1e894 // indirect
|
||||
filippo.io/edwards25519 v1.0.0 // indirect
|
||||
github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect
|
||||
github.com/99designs/keyring v1.2.1 // indirect
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible // indirect
|
||||
github.com/DataDog/zstd v1.5.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cockroachdb/errors v1.11.1 // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v1.1.0 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/cometbft/cometbft-db v0.9.1 // indirect
|
||||
github.com/cosmos/btcutil v1.0.5 // indirect
|
||||
github.com/cosmos/cosmos-db v1.0.2 // indirect
|
||||
github.com/cosmos/go-bip39 v1.0.0 // indirect
|
||||
github.com/cosmos/ics23/go v0.10.0 // indirect
|
||||
github.com/cosmos/ledger-cosmos-go v0.13.3 // indirect
|
||||
github.com/danieljoos/wincred v1.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
|
||||
github.com/dgraph-io/badger/v2 v2.2007.4 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emicklei/dot v1.6.1 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.27.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
|
||||
github.com/gogo/googleapis v1.4.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.2.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/orderedcode v0.0.1 // indirect
|
||||
github.com/gorilla/handlers v1.5.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
|
||||
github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect
|
||||
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-metrics v0.5.1 // indirect
|
||||
github.com/hashicorp/go-plugin v1.5.2 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
|
||||
github.com/huandu/skiplist v1.2.0 // indirect
|
||||
github.com/iancoleman/strcase v0.3.0 // indirect
|
||||
github.com/improbable-eng/grpc-web v0.15.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/linxGnu/grocksdb v1.8.12 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/highwayhash v1.0.2 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mtibben/percent v0.2.1 // indirect
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.47.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rogpeppe/go-internal v1.12.0 // indirect
|
||||
github.com/rs/cors v1.8.3 // indirect
|
||||
github.com/rs/zerolog v1.32.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sasha-s/go-deadlock v0.3.1 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/viper v1.18.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tendermint/go-amino v0.16.0 // indirect
|
||||
github.com/tidwall/btree v1.7.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/zondax/hid v0.9.2 // indirect
|
||||
github.com/zondax/ledger-go v0.14.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.8 // indirect
|
||||
go.uber.org/multierr v1.10.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gotest.tools/v3 v3.5.1 // indirect
|
||||
nhooyr.io/websocket v1.8.6 // indirect
|
||||
pgregory.net/rapid v1.1.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
1030
systemtests/go.sum
Normal file
1030
systemtests/go.sum
Normal file
File diff suppressed because it is too large
Load Diff
7
systemtests/main_test.go
Normal file
7
systemtests/main_test.go
Normal file
@ -0,0 +1,7 @@
|
||||
package systemtests
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
RunTests(m)
|
||||
}
|
||||
33
systemtests/rpc_client.go
Normal file
33
systemtests/rpc_client.go
Normal file
@ -0,0 +1,33 @@
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
client "github.com/cometbft/cometbft/rpc/client/http"
|
||||
cmtypes "github.com/cometbft/cometbft/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// RPCClient is a test helper to interact with a node via the RPC endpoint.
|
||||
type RPCClient struct {
|
||||
client *client.HTTP
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// NewRPCClient constructor
|
||||
func NewRPCClient(t *testing.T, addr string) RPCClient {
|
||||
t.Helper()
|
||||
httpClient, err := client.New(addr, "/websocket")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, httpClient.Start())
|
||||
t.Cleanup(func() { _ = httpClient.Stop() })
|
||||
return RPCClient{client: httpClient, t: t}
|
||||
}
|
||||
|
||||
// Validators returns list of validators
|
||||
func (r RPCClient) Validators() []*cmtypes.Validator {
|
||||
v, err := r.client.Validators(context.Background(), nil, nil, nil)
|
||||
require.NoError(r.t, err)
|
||||
return v.Validators
|
||||
}
|
||||
54
systemtests/staking_test.go
Normal file
54
systemtests/staking_test.go
Normal file
@ -0,0 +1,54 @@
|
||||
//go:build system_test
|
||||
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
func TestStakeUnstake(t *testing.T) {
|
||||
// Scenario:
|
||||
// delegate tokens to validator
|
||||
// undelegate some tokens
|
||||
|
||||
sut.ResetChain(t)
|
||||
|
||||
cli := NewCLIWrapper(t, sut, verbose)
|
||||
|
||||
// add genesis account with some tokens
|
||||
account1Addr := cli.AddKey("account1")
|
||||
sut.ModifyGenesisCLI(t,
|
||||
[]string{"genesis", "add-genesis-account", account1Addr, "10000000stake"},
|
||||
)
|
||||
|
||||
sut.StartChain(t)
|
||||
|
||||
// query validator address to delegate tokens
|
||||
rsp := cli.CustomQuery("q", "staking", "validators")
|
||||
valAddr := gjson.Get(rsp, "validators.#.operator_address").Array()[0].String()
|
||||
|
||||
// stake tokens
|
||||
rsp = cli.Run("tx", "staking", "delegate", valAddr, "10000stake", "--from="+account1Addr, "--fees=1stake")
|
||||
RequireTxSuccess(t, rsp)
|
||||
|
||||
t.Log(cli.QueryBalance(account1Addr, "stake"))
|
||||
assert.Equal(t, int64(9989999), cli.QueryBalance(account1Addr, "stake"))
|
||||
|
||||
rsp = cli.CustomQuery("q", "staking", "delegation", account1Addr, valAddr)
|
||||
assert.Equal(t, "10000", gjson.Get(rsp, "delegation_response.balance.amount").String(), rsp)
|
||||
assert.Equal(t, "stake", gjson.Get(rsp, "delegation_response.balance.denom").String(), rsp)
|
||||
|
||||
// unstake tokens
|
||||
rsp = cli.Run("tx", "staking", "unbond", valAddr, "5000stake", "--from="+account1Addr, "--fees=1stake")
|
||||
RequireTxSuccess(t, rsp)
|
||||
|
||||
rsp = cli.CustomQuery("q", "staking", "delegation", account1Addr, valAddr)
|
||||
assert.Equal(t, "5000", gjson.Get(rsp, "delegation_response.balance.amount").String(), rsp)
|
||||
assert.Equal(t, "stake", gjson.Get(rsp, "delegation_response.balance.denom").String(), rsp)
|
||||
|
||||
rsp = cli.CustomQuery("q", "staking", "unbonding-delegation", account1Addr, valAddr)
|
||||
assert.Equal(t, "5000", gjson.Get(rsp, "unbond.entries.#.balance").Array()[0].String(), rsp)
|
||||
}
|
||||
963
systemtests/system.go
Normal file
963
systemtests/system.go
Normal file
@ -0,0 +1,963 @@
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"container/ring"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cometbft/cometbft/libs/sync"
|
||||
client "github.com/cometbft/cometbft/rpc/client/http"
|
||||
ctypes "github.com/cometbft/cometbft/rpc/core/types"
|
||||
tmtypes "github.com/cometbft/cometbft/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tidwall/sjson"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/server"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// WorkDir is the directory where tests are executed. Path should be relative to this dir
|
||||
WorkDir string
|
||||
|
||||
// ExecBinaryUnversionedRegExp regular expression to extract the unversioned binary name
|
||||
ExecBinaryUnversionedRegExp = regexp.MustCompile(`^(\w+)-?.*$`)
|
||||
)
|
||||
|
||||
type TestnetInitializer interface {
|
||||
Initialize()
|
||||
}
|
||||
|
||||
// SystemUnderTest blockchain provisioning
|
||||
type SystemUnderTest struct {
|
||||
execBinary string
|
||||
blockListener *EventListener
|
||||
currentHeight int64
|
||||
outputDir string
|
||||
testnetInitializer TestnetInitializer
|
||||
|
||||
// blockTime is the the expected/desired block time. This is not going to be very precise
|
||||
// since Tendermint consensus does not allow specifying it directly.
|
||||
blockTime time.Duration
|
||||
rpcAddr string
|
||||
initialNodesCount int
|
||||
nodesCount int
|
||||
minGasPrice string
|
||||
cleanupFn []CleanupFn
|
||||
outBuff *ring.Ring
|
||||
errBuff *ring.Ring
|
||||
out io.Writer
|
||||
verbose bool
|
||||
ChainStarted bool
|
||||
projectName string
|
||||
dirty bool // requires full reset when marked dirty
|
||||
|
||||
pidsLock sync.RWMutex
|
||||
pids map[int]struct{}
|
||||
chainID string
|
||||
}
|
||||
|
||||
func NewSystemUnderTest(execBinary string, verbose bool, nodesCount int, blockTime time.Duration, initer ...TestnetInitializer) *SystemUnderTest {
|
||||
if execBinary == "" {
|
||||
panic("executable binary name must not be empty")
|
||||
}
|
||||
nameTokens := ExecBinaryUnversionedRegExp.FindAllString(execBinary, 1)
|
||||
if len(nameTokens) == 0 || nameTokens[0] == "" {
|
||||
panic("failed to parse project name from binary")
|
||||
}
|
||||
|
||||
execBinary = filepath.Join(WorkDir, "binaries", execBinary)
|
||||
s := &SystemUnderTest{
|
||||
chainID: "testing",
|
||||
execBinary: execBinary,
|
||||
outputDir: "./testnet",
|
||||
blockTime: blockTime,
|
||||
rpcAddr: "tcp://localhost:26657",
|
||||
initialNodesCount: nodesCount,
|
||||
outBuff: ring.New(100),
|
||||
errBuff: ring.New(100),
|
||||
out: os.Stdout,
|
||||
verbose: verbose,
|
||||
minGasPrice: fmt.Sprintf("0.000001%s", sdk.DefaultBondDenom),
|
||||
projectName: nameTokens[0],
|
||||
pids: make(map[int]struct{}, nodesCount),
|
||||
}
|
||||
s.testnetInitializer = NewSingleHostTestnetCmdInitializer(execBinary, WorkDir, s.chainID, s.outputDir, s.initialNodesCount, s.minGasPrice, s.CommitTimeout(), s.Log)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) SetExecBinary(binary string) {
|
||||
s.execBinary = binary
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) SetTestnetInitializer(testnetInitializer TestnetInitializer) {
|
||||
s.testnetInitializer = testnetInitializer
|
||||
}
|
||||
|
||||
// CommitTimeout returns the max time to wait for a commit. Default to 90% of block time
|
||||
func (s *SystemUnderTest) CommitTimeout() time.Duration {
|
||||
// The commit timeout is a lower bound for the block time. We try to set it to a level that allows us to reach the expected block time.
|
||||
return time.Duration((int64(s.blockTime) * 90) / 100) // leave 10% for all other operations
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) SetupChain() {
|
||||
s.Logf("Setup chain: %s\n", s.outputDir)
|
||||
if err := os.RemoveAll(filepath.Join(WorkDir, s.outputDir)); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
s.testnetInitializer.Initialize()
|
||||
s.nodesCount = s.initialNodesCount
|
||||
|
||||
// modify genesis with system test defaults
|
||||
src := filepath.Join(WorkDir, s.nodePath(0), "config", "genesis.json")
|
||||
genesisBz, err := os.ReadFile(src) // #nosec G304
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to load genesis: %s", err))
|
||||
}
|
||||
|
||||
genesisBz, err = sjson.SetRawBytes(genesisBz, "consensus.params.block.max_gas", []byte(fmt.Sprintf(`"%d"`, 10_000_000)))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed set block max gas: %s", err))
|
||||
}
|
||||
s.withEachNodeHome(func(i int, home string) {
|
||||
if err := saveGenesis(home, genesisBz); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
// backup genesis
|
||||
dest := filepath.Join(WorkDir, s.nodePath(0), "config", "genesis.json.orig")
|
||||
if _, err := copyFile(src, dest); err != nil {
|
||||
panic(fmt.Sprintf("copy failed :%#+v", err))
|
||||
}
|
||||
// backup keyring
|
||||
src = filepath.Join(WorkDir, s.nodePath(0), "keyring-test")
|
||||
dest = filepath.Join(WorkDir, s.outputDir, "keyring-test")
|
||||
if err := copyFilesInDir(src, dest); err != nil {
|
||||
panic(fmt.Sprintf("copy files from dir :%#+v", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) StartChain(t *testing.T, xargs ...string) {
|
||||
t.Helper()
|
||||
s.Log("Start chain\n")
|
||||
s.ChainStarted = true
|
||||
s.startNodesAsync(t, append([]string{"start", "--trace", "--log_level=info"}, xargs...)...)
|
||||
|
||||
s.AwaitNodeUp(t, s.rpcAddr)
|
||||
|
||||
t.Log("Start new block listener")
|
||||
s.blockListener = NewEventListener(t, s.rpcAddr)
|
||||
s.cleanupFn = append(s.cleanupFn,
|
||||
s.blockListener.Subscribe("tm.event='NewBlock'", func(e ctypes.ResultEvent) (more bool) {
|
||||
newBlock, ok := e.Data.(tmtypes.EventDataNewBlock)
|
||||
require.True(t, ok, "unexpected type %T", e.Data)
|
||||
atomic.StoreInt64(&s.currentHeight, newBlock.Block.Height)
|
||||
return true
|
||||
}),
|
||||
)
|
||||
s.AwaitNextBlock(t, 4e9)
|
||||
}
|
||||
|
||||
// MarkDirty whole chain will be reset when marked dirty
|
||||
func (s *SystemUnderTest) MarkDirty() {
|
||||
s.dirty = true
|
||||
}
|
||||
|
||||
// IsDirty true when non default genesis or other state modification were applied that might create incompatibility for tests
|
||||
func (s *SystemUnderTest) IsDirty() bool {
|
||||
return s.dirty
|
||||
}
|
||||
|
||||
// watchLogs stores stdout/stderr in a file and in a ring buffer to output the last n lines on test error
|
||||
func (s *SystemUnderTest) watchLogs(node int, cmd *exec.Cmd) {
|
||||
logfile, err := os.Create(filepath.Join(WorkDir, s.outputDir, fmt.Sprintf("node%d.out", node)))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("open logfile error %#+v", err))
|
||||
}
|
||||
|
||||
errReader, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("stderr reader error %#+v", err))
|
||||
}
|
||||
stopRingBuffer := make(chan struct{})
|
||||
go appendToBuf(io.TeeReader(errReader, logfile), s.errBuff, stopRingBuffer)
|
||||
|
||||
outReader, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("stdout reader error %#+v", err))
|
||||
}
|
||||
go appendToBuf(io.TeeReader(outReader, logfile), s.outBuff, stopRingBuffer)
|
||||
s.cleanupFn = append(s.cleanupFn, func() {
|
||||
close(stopRingBuffer)
|
||||
_ = logfile.Close()
|
||||
})
|
||||
}
|
||||
|
||||
func appendToBuf(r io.Reader, b *ring.Ring, stop <-chan struct{}) {
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
text := scanner.Text()
|
||||
// filter out noise
|
||||
if isLogNoise(text) {
|
||||
continue
|
||||
}
|
||||
b.Value = text
|
||||
b = b.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func isLogNoise(text string) bool {
|
||||
for _, v := range []string{
|
||||
"\x1b[36mmodule=\x1b[0mrpc-server", // "module=rpc-server",
|
||||
} {
|
||||
if strings.Contains(text, v) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// AwaitUpgradeInfo blocks util an upgrade info file is persisted to disk
|
||||
func (s *SystemUnderTest) AwaitUpgradeInfo(t *testing.T) {
|
||||
t.Helper()
|
||||
var found bool
|
||||
for !found {
|
||||
s.withEachNodeHome(func(i int, home string) {
|
||||
_, err := os.Stat(filepath.Join(s.nodePath(0), "data", "upgrade-info.json"))
|
||||
switch {
|
||||
case err == nil:
|
||||
found = true
|
||||
case !os.IsNotExist(err):
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
})
|
||||
time.Sleep(s.blockTime / 2)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) AwaitChainStopped() {
|
||||
for s.anyNodeRunning() {
|
||||
time.Sleep(s.blockTime)
|
||||
}
|
||||
}
|
||||
|
||||
// AwaitNodeUp ensures the node is running
|
||||
func (s *SystemUnderTest) AwaitNodeUp(t *testing.T, rpcAddr string) {
|
||||
t.Helper()
|
||||
t.Logf("Await node is up: %s", rpcAddr)
|
||||
timeout := DefaultWaitTime
|
||||
ctx, done := context.WithTimeout(context.Background(), timeout)
|
||||
defer done()
|
||||
|
||||
started := make(chan struct{})
|
||||
go func() { // query for a non empty block on status page
|
||||
t.Logf("Checking node status: %s\n", rpcAddr)
|
||||
for {
|
||||
con, err := client.New(rpcAddr, "/websocket")
|
||||
if err != nil || con.Start() != nil {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
result, err := con.Status(ctx)
|
||||
if err != nil || result.SyncInfo.LatestBlockHeight < 1 {
|
||||
_ = con.Stop()
|
||||
continue
|
||||
}
|
||||
t.Logf("Node started. Current block: %d\n", result.SyncInfo.LatestBlockHeight)
|
||||
_ = con.Stop()
|
||||
started <- struct{}{}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-started:
|
||||
case <-ctx.Done():
|
||||
require.NoError(t, ctx.Err())
|
||||
case <-time.NewTimer(timeout).C:
|
||||
t.Fatalf("timeout waiting for node start: %s", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// StopChain stops the system under test and executes all registered cleanup callbacks
|
||||
func (s *SystemUnderTest) StopChain() {
|
||||
s.Log("Stop chain\n")
|
||||
if !s.ChainStarted {
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range s.cleanupFn {
|
||||
c()
|
||||
}
|
||||
s.cleanupFn = nil
|
||||
// send SIGTERM
|
||||
s.withEachPid(func(p *os.Process) {
|
||||
go func() {
|
||||
if err := p.Signal(syscall.SIGTERM); err != nil {
|
||||
s.Logf("failed to stop node with pid %d: %s\n", p.Pid, err)
|
||||
}
|
||||
}()
|
||||
})
|
||||
// give some final time to shut down
|
||||
s.withEachPid(func(p *os.Process) {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
})
|
||||
// goodbye
|
||||
for ; s.anyNodeRunning(); time.Sleep(100 * time.Millisecond) {
|
||||
s.withEachPid(func(p *os.Process) {
|
||||
s.Logf("killing node %d\n", p.Pid)
|
||||
if err := p.Kill(); err != nil {
|
||||
s.Logf("failed to kill node with pid %d: %s\n", p.Pid, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
s.ChainStarted = false
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) withEachPid(cb func(p *os.Process)) {
|
||||
s.pidsLock.RLock()
|
||||
pids := s.pids
|
||||
s.pidsLock.RUnlock()
|
||||
|
||||
for pid := range pids {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
cb(p)
|
||||
}
|
||||
}
|
||||
|
||||
// PrintBuffer prints the chain logs to the console
|
||||
func (s *SystemUnderTest) PrintBuffer() {
|
||||
s.outBuff.Do(func(v interface{}) {
|
||||
if v != nil {
|
||||
_, _ = fmt.Fprintf(s.out, "out> %s\n", v)
|
||||
}
|
||||
})
|
||||
fmt.Fprint(s.out, "8< chain err -----------------------------------------\n")
|
||||
s.errBuff.Do(func(v interface{}) {
|
||||
if v != nil {
|
||||
_, _ = fmt.Fprintf(s.out, "err> %s\n", v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// AwaitBlockHeight blocks until te target height is reached. An optional timout parameter can be passed to abort early
|
||||
func (s *SystemUnderTest) AwaitBlockHeight(t *testing.T, targetHeight int64, timeout ...time.Duration) {
|
||||
t.Helper()
|
||||
require.Greater(t, targetHeight, s.currentHeight)
|
||||
var maxWaitTime time.Duration
|
||||
if len(timeout) != 0 {
|
||||
maxWaitTime = timeout[0]
|
||||
} else {
|
||||
maxWaitTime = time.Duration(targetHeight-s.currentHeight+3) * s.blockTime
|
||||
}
|
||||
abort := time.NewTimer(maxWaitTime).C
|
||||
for {
|
||||
select {
|
||||
case <-abort:
|
||||
t.Fatalf("Timeout - block %d not reached within %s", targetHeight, maxWaitTime)
|
||||
return
|
||||
default:
|
||||
if current := s.AwaitNextBlock(t); current >= targetHeight {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AwaitNextBlock is a first class function that any caller can use to ensure a new block was minted.
|
||||
// Returns the new height
|
||||
func (s *SystemUnderTest) AwaitNextBlock(t *testing.T, timeout ...time.Duration) int64 {
|
||||
t.Helper()
|
||||
maxWaitTime := s.blockTime * 3
|
||||
if len(timeout) != 0 { // optional argument to overwrite default timeout
|
||||
maxWaitTime = timeout[0]
|
||||
}
|
||||
done := make(chan int64)
|
||||
go func() {
|
||||
for start, current := atomic.LoadInt64(&s.currentHeight), atomic.LoadInt64(&s.currentHeight); current == start; current = atomic.LoadInt64(&s.currentHeight) {
|
||||
time.Sleep(s.blockTime)
|
||||
}
|
||||
done <- atomic.LoadInt64(&s.currentHeight)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case v := <-done:
|
||||
return v
|
||||
case <-time.NewTimer(maxWaitTime).C:
|
||||
t.Fatalf("Timeout - no block within %s", maxWaitTime)
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// ResetDirtyChain reset chain when non default setup or state (dirty)
|
||||
func (s *SystemUnderTest) ResetDirtyChain(t *testing.T) {
|
||||
t.Helper()
|
||||
if s.IsDirty() {
|
||||
s.ResetChain(t)
|
||||
}
|
||||
}
|
||||
|
||||
// ResetChain stops and clears all nodes state via 'unsafe-reset-all'
|
||||
func (s *SystemUnderTest) ResetChain(t *testing.T) {
|
||||
t.Helper()
|
||||
t.Log("Reset chain")
|
||||
s.StopChain()
|
||||
restoreOriginalGenesis(t, s)
|
||||
restoreOriginalKeyring(t, s)
|
||||
s.resetBuffers()
|
||||
|
||||
// remove all additional nodes
|
||||
for i := s.initialNodesCount; i < s.nodesCount; i++ {
|
||||
_ = os.RemoveAll(filepath.Join(WorkDir, s.nodePath(i)))
|
||||
_ = os.Remove(filepath.Join(WorkDir, s.outputDir, fmt.Sprintf("node%d.out", i)))
|
||||
}
|
||||
s.nodesCount = s.initialNodesCount
|
||||
|
||||
// reset all validator nodes
|
||||
s.ForEachNodeExecAndWait(t, []string{"comet", "unsafe-reset-all"})
|
||||
s.currentHeight = 0
|
||||
s.dirty = false
|
||||
}
|
||||
|
||||
// ModifyGenesisCLI executes the CLI commands to modify the genesis
|
||||
func (s *SystemUnderTest) ModifyGenesisCLI(t *testing.T, cmds ...[]string) {
|
||||
t.Helper()
|
||||
s.ForEachNodeExecAndWait(t, cmds...)
|
||||
s.MarkDirty()
|
||||
}
|
||||
|
||||
type GenesisMutator func([]byte) []byte
|
||||
|
||||
// ModifyGenesisJSON resets the chain and executes the callbacks to update the json representation
|
||||
// The mutator callbacks after each other receive the genesis as raw bytes and return the updated genesis for the next.
|
||||
// example:
|
||||
//
|
||||
// return func(genesis []byte) []byte {
|
||||
// val, _ := json.Marshal(sdk.NewDecCoins(fees...))
|
||||
// state, _ := sjson.SetRawBytes(genesis, "app_state.globalfee.params.minimum_gas_prices", val)
|
||||
// return state
|
||||
// }
|
||||
func (s *SystemUnderTest) ModifyGenesisJSON(t *testing.T, mutators ...GenesisMutator) {
|
||||
t.Helper()
|
||||
s.ResetChain(t)
|
||||
s.modifyGenesisJSON(t, mutators...)
|
||||
}
|
||||
|
||||
// modify json without enforcing a reset
|
||||
func (s *SystemUnderTest) modifyGenesisJSON(t *testing.T, mutators ...GenesisMutator) {
|
||||
t.Helper()
|
||||
require.Empty(t, s.currentHeight, "forced chain reset required")
|
||||
current, err := os.ReadFile(filepath.Join(WorkDir, s.nodePath(0), "config", "genesis.json"))
|
||||
require.NoError(t, err)
|
||||
for _, m := range mutators {
|
||||
current = m(current)
|
||||
}
|
||||
out := storeTempFile(t, current)
|
||||
defer os.Remove(out.Name())
|
||||
s.setGenesis(t, out.Name())
|
||||
s.MarkDirty()
|
||||
}
|
||||
|
||||
// ReadGenesisJSON returns current genesis.json content as raw string
|
||||
func (s *SystemUnderTest) ReadGenesisJSON(t *testing.T) string {
|
||||
t.Helper()
|
||||
content, err := os.ReadFile(filepath.Join(WorkDir, s.nodePath(0), "config", "genesis.json"))
|
||||
require.NoError(t, err)
|
||||
return string(content)
|
||||
}
|
||||
|
||||
// setGenesis copy genesis file to all nodes
|
||||
func (s *SystemUnderTest) setGenesis(t *testing.T, srcPath string) {
|
||||
t.Helper()
|
||||
in, err := os.Open(srcPath)
|
||||
require.NoError(t, err)
|
||||
defer in.Close()
|
||||
var buf bytes.Buffer
|
||||
|
||||
_, err = io.Copy(&buf, in)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.withEachNodeHome(func(i int, home string) {
|
||||
require.NoError(t, saveGenesis(home, buf.Bytes()))
|
||||
})
|
||||
}
|
||||
|
||||
func saveGenesis(home string, content []byte) error {
|
||||
out, err := os.Create(filepath.Join(WorkDir, home, "config", "genesis.json"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("out file: %w", err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if _, err = io.Copy(out, bytes.NewReader(content)); err != nil {
|
||||
return fmt.Errorf("write out file: %w", err)
|
||||
}
|
||||
|
||||
if err = out.Close(); err != nil {
|
||||
return fmt.Errorf("close out file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForEachNodeExecAndWait runs the given app executable commands for all cluster nodes synchronously
|
||||
// The commands output is returned for each node.
|
||||
func (s *SystemUnderTest) ForEachNodeExecAndWait(t *testing.T, cmds ...[]string) [][]string {
|
||||
t.Helper()
|
||||
result := make([][]string, s.nodesCount)
|
||||
s.withEachNodeHome(func(i int, home string) {
|
||||
result[i] = make([]string, len(cmds))
|
||||
for j, xargs := range cmds {
|
||||
xargs = append(xargs, "--home", home)
|
||||
s.Logf("Execute `%s %s`\n", s.execBinary, strings.Join(xargs, " "))
|
||||
out := runShellCmd(t, s.execBinary, xargs...)
|
||||
s.Logf("Result: %s\n", out)
|
||||
result[i][j] = out
|
||||
}
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
func runShellCmd(t *testing.T, cmd string, args ...string) string {
|
||||
t.Helper()
|
||||
out, err := runShellCmdX(cmd, args...)
|
||||
require.NoError(t, err)
|
||||
return out
|
||||
}
|
||||
|
||||
func runShellCmdX(cmd string, args ...string) (string, error) {
|
||||
c := exec.Command( //nolint:gosec // used by tests only
|
||||
locateExecutable(cmd),
|
||||
args...,
|
||||
)
|
||||
c.Dir = WorkDir
|
||||
out, err := c.CombinedOutput()
|
||||
if err != nil {
|
||||
return string(out), fmt.Errorf("run `%s %s`: out: %s: %w", cmd, strings.Join(args, " "), string(out), err)
|
||||
}
|
||||
return string(out), nil
|
||||
}
|
||||
|
||||
// startNodesAsync runs the given app cli command for all cluster nodes and returns without waiting
|
||||
func (s *SystemUnderTest) startNodesAsync(t *testing.T, xargs ...string) {
|
||||
t.Helper()
|
||||
s.withEachNodeHome(func(i int, home string) {
|
||||
args := append(xargs, "--home", home)
|
||||
s.Logf("Execute `%s %s`\n", s.execBinary, strings.Join(args, " "))
|
||||
cmd := exec.Command( //nolint:gosec // used by tests only
|
||||
locateExecutable(s.execBinary),
|
||||
args...,
|
||||
)
|
||||
cmd.Dir = WorkDir
|
||||
s.watchLogs(i, cmd)
|
||||
require.NoError(t, cmd.Start(), "node %d", i)
|
||||
|
||||
pid := cmd.Process.Pid
|
||||
s.pidsLock.Lock()
|
||||
s.pids[pid] = struct{}{}
|
||||
s.pidsLock.Unlock()
|
||||
s.Logf("Node started: %d\n", pid)
|
||||
|
||||
// cleanup when stopped
|
||||
go func(pid int) {
|
||||
_ = cmd.Wait() // blocks until shutdown
|
||||
s.pidsLock.Lock()
|
||||
delete(s.pids, pid)
|
||||
s.pidsLock.Unlock()
|
||||
s.Logf("Node stopped: %d\n", pid)
|
||||
}(pid)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) withEachNodeHome(cb func(i int, home string)) {
|
||||
for i := 0; i < s.nodesCount; i++ {
|
||||
cb(i, s.nodePath(i))
|
||||
}
|
||||
}
|
||||
|
||||
// nodePath returns the path of the node within the work dir. not absolute
|
||||
func (s *SystemUnderTest) nodePath(i int) string {
|
||||
return NodePath(i, s.outputDir, s.projectName)
|
||||
}
|
||||
|
||||
func NodePath(n int, outputDir, name string) string {
|
||||
return fmt.Sprintf("%s/node%d/%s", outputDir, n, name)
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) Log(msg string) {
|
||||
if s.verbose {
|
||||
_, _ = fmt.Fprint(s.out, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) Logf(msg string, args ...interface{}) {
|
||||
s.Log(fmt.Sprintf(msg, args...))
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) RPCClient(t *testing.T) RPCClient {
|
||||
t.Helper()
|
||||
return NewRPCClient(t, s.rpcAddr)
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) AllPeers(t *testing.T) []string {
|
||||
t.Helper()
|
||||
result := make([]string, s.nodesCount)
|
||||
for i, n := range s.AllNodes(t) {
|
||||
result[i] = n.PeerAddr()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) AllNodes(t *testing.T) []Node {
|
||||
t.Helper()
|
||||
return AllNodes(t, s)
|
||||
}
|
||||
|
||||
func AllNodes(t *testing.T, s *SystemUnderTest) []Node {
|
||||
t.Helper()
|
||||
result := make([]Node, s.nodesCount)
|
||||
outs := s.ForEachNodeExecAndWait(t, []string{"comet", "show-node-id"})
|
||||
ip, err := server.ExternalIP()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, out := range outs {
|
||||
result[i] = Node{
|
||||
ID: strings.TrimSpace(out[0]),
|
||||
IP: ip,
|
||||
RPCPort: 26657 + i, // as defined in testnet command
|
||||
P2PPort: 16656 + i, // as defined in testnet command
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *SystemUnderTest) resetBuffers() {
|
||||
s.outBuff = ring.New(100)
|
||||
s.errBuff = ring.New(100)
|
||||
}
|
||||
|
||||
// AddFullnode starts a new fullnode that connects to the existing chain but is not a validator.
|
||||
func (s *SystemUnderTest) AddFullnode(t *testing.T, beforeStart ...func(nodeNumber int, nodePath string)) Node {
|
||||
t.Helper()
|
||||
s.MarkDirty()
|
||||
s.nodesCount++
|
||||
nodeNumber := s.nodesCount - 1
|
||||
nodePath := s.nodePath(nodeNumber)
|
||||
_ = os.RemoveAll(nodePath) // drop any legacy path, just in case
|
||||
|
||||
// prepare new node
|
||||
moniker := fmt.Sprintf("node%d", nodeNumber)
|
||||
args := []string{"init", moniker, "--home", nodePath, "--overwrite"}
|
||||
s.Logf("Execute `%s %s`\n", s.execBinary, strings.Join(args, " "))
|
||||
cmd := exec.Command( //nolint:gosec // used by tests only
|
||||
locateExecutable(s.execBinary),
|
||||
args...,
|
||||
)
|
||||
cmd.Dir = WorkDir
|
||||
s.watchLogs(nodeNumber, cmd)
|
||||
require.NoError(t, cmd.Run(), "failed to start node with id %d", nodeNumber)
|
||||
require.NoError(t, saveGenesis(nodePath, []byte(s.ReadGenesisJSON(t))))
|
||||
|
||||
// quick hack: copy config and overwrite by start params
|
||||
configFile := filepath.Join(WorkDir, nodePath, "config", "config.toml")
|
||||
_ = os.Remove(configFile)
|
||||
_, err := copyFile(filepath.Join(WorkDir, s.nodePath(0), "config", "config.toml"), configFile)
|
||||
require.NoError(t, err)
|
||||
|
||||
// start node
|
||||
allNodes := s.AllNodes(t)
|
||||
node := allNodes[len(allNodes)-1]
|
||||
peers := make([]string, len(allNodes)-1)
|
||||
for i, n := range allNodes[0 : len(allNodes)-1] {
|
||||
peers[i] = n.PeerAddr()
|
||||
}
|
||||
for _, c := range beforeStart {
|
||||
c(nodeNumber, nodePath)
|
||||
}
|
||||
args = []string{
|
||||
"start",
|
||||
"--p2p.persistent_peers=" + strings.Join(peers, ","),
|
||||
fmt.Sprintf("--p2p.laddr=tcp://localhost:%d", node.P2PPort),
|
||||
fmt.Sprintf("--rpc.laddr=tcp://localhost:%d", node.RPCPort),
|
||||
fmt.Sprintf("--grpc.address=localhost:%d", 9090+nodeNumber),
|
||||
fmt.Sprintf("--grpc-web.address=localhost:%d", 8090+nodeNumber),
|
||||
"--moniker=" + moniker,
|
||||
"--log_level=info",
|
||||
"--home", nodePath,
|
||||
}
|
||||
s.Logf("Execute `%s %s`\n", s.execBinary, strings.Join(args, " "))
|
||||
cmd = exec.Command( //nolint:gosec // used by tests only
|
||||
locateExecutable(s.execBinary),
|
||||
args...,
|
||||
)
|
||||
cmd.Dir = WorkDir
|
||||
s.watchLogs(nodeNumber, cmd)
|
||||
require.NoError(t, cmd.Start(), "node %d", nodeNumber)
|
||||
return node
|
||||
}
|
||||
|
||||
// NewEventListener constructor for Eventlistener with system rpc address
|
||||
func (s *SystemUnderTest) NewEventListener(t *testing.T) *EventListener {
|
||||
t.Helper()
|
||||
return NewEventListener(t, s.rpcAddr)
|
||||
}
|
||||
|
||||
// is any process let running?
|
||||
func (s *SystemUnderTest) anyNodeRunning() bool {
|
||||
s.pidsLock.RLock()
|
||||
defer s.pidsLock.RUnlock()
|
||||
return len(s.pids) != 0
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
ID string
|
||||
IP string
|
||||
RPCPort int
|
||||
P2PPort int
|
||||
}
|
||||
|
||||
func (n Node) PeerAddr() string {
|
||||
return fmt.Sprintf("%s@%s:%d", n.ID, n.IP, n.P2PPort)
|
||||
}
|
||||
|
||||
func (n Node) RPCAddr() string {
|
||||
return fmt.Sprintf("tcp://%s:%d", n.IP, n.RPCPort)
|
||||
}
|
||||
|
||||
// locateExecutable looks up the binary on the OS path.
|
||||
func locateExecutable(file string) string {
|
||||
if strings.TrimSpace(file) == "" {
|
||||
panic("executable binary name must not be empty")
|
||||
}
|
||||
path, err := exec.LookPath(file)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error with file %q: %s", file, err.Error()))
|
||||
}
|
||||
if path == "" {
|
||||
panic(fmt.Sprintf("%q not found", file))
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// EventListener watches for events on the chain
|
||||
type EventListener struct {
|
||||
t *testing.T
|
||||
client *client.HTTP
|
||||
}
|
||||
|
||||
// NewEventListener event listener
|
||||
func NewEventListener(t *testing.T, rpcAddr string) *EventListener {
|
||||
t.Helper()
|
||||
httpClient, err := client.New(rpcAddr, "/websocket")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, httpClient.Start())
|
||||
return &EventListener{client: httpClient, t: t}
|
||||
}
|
||||
|
||||
var DefaultWaitTime = 30 * time.Second
|
||||
|
||||
type (
|
||||
CleanupFn func()
|
||||
EventConsumer func(e ctypes.ResultEvent) (more bool)
|
||||
)
|
||||
|
||||
// Subscribe to receive events for a topic. Does not block.
|
||||
// For query syntax See https://docs.cosmos.network/master/core/events.html#subscribing-to-events
|
||||
func (l *EventListener) Subscribe(query string, cb EventConsumer) func() {
|
||||
ctx, done := context.WithCancel(context.Background())
|
||||
l.t.Cleanup(done)
|
||||
eventsChan, err := l.client.WSEvents.Subscribe(ctx, "testing", query)
|
||||
require.NoError(l.t, err)
|
||||
cleanup := func() {
|
||||
ctx, _ := context.WithTimeout(ctx, DefaultWaitTime) //nolint:govet // used in cleanup only
|
||||
go l.client.WSEvents.Unsubscribe(ctx, "testing", query) //nolint:errcheck // used by tests only
|
||||
done()
|
||||
}
|
||||
go func() {
|
||||
for e := range eventsChan {
|
||||
if !cb(e) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return cleanup
|
||||
}
|
||||
|
||||
// AwaitQuery blocks and waits for a single result or timeout. This can be used with `broadcast-mode=async`.
|
||||
// For query syntax See https://docs.cosmos.network/master/core/events.html#subscribing-to-events
|
||||
func (l *EventListener) AwaitQuery(query string, optMaxWaitTime ...time.Duration) *ctypes.ResultEvent {
|
||||
c, result := CaptureSingleEventConsumer()
|
||||
maxWaitTime := DefaultWaitTime
|
||||
if len(optMaxWaitTime) != 0 {
|
||||
maxWaitTime = optMaxWaitTime[0]
|
||||
}
|
||||
cleanupFn := l.Subscribe(query, TimeoutConsumer(l.t, maxWaitTime, c))
|
||||
l.t.Cleanup(cleanupFn)
|
||||
return result
|
||||
}
|
||||
|
||||
// TimeoutConsumer is an event consumer decorator with a max wait time. Panics when wait time exceeded without
|
||||
// a result returned
|
||||
func TimeoutConsumer(t *testing.T, maxWaitTime time.Duration, next EventConsumer) EventConsumer {
|
||||
t.Helper()
|
||||
ctx, done := context.WithCancel(context.Background())
|
||||
t.Cleanup(done)
|
||||
timeout := time.NewTimer(maxWaitTime)
|
||||
timedOut := make(chan struct{}, 1)
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-timeout.C:
|
||||
timedOut <- struct{}{}
|
||||
close(timedOut)
|
||||
}
|
||||
}()
|
||||
return func(e ctypes.ResultEvent) (more bool) {
|
||||
select {
|
||||
case <-timedOut:
|
||||
t.Fatalf("Timeout waiting for new events %s", maxWaitTime)
|
||||
return false
|
||||
default:
|
||||
timeout.Reset(maxWaitTime)
|
||||
result := next(e)
|
||||
if !result {
|
||||
done()
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CaptureSingleEventConsumer consumes one event. No timeout
|
||||
func CaptureSingleEventConsumer() (EventConsumer, *ctypes.ResultEvent) {
|
||||
var result ctypes.ResultEvent
|
||||
return func(e ctypes.ResultEvent) (more bool) {
|
||||
return false
|
||||
}, &result
|
||||
}
|
||||
|
||||
// CaptureAllEventsConsumer is an `EventConsumer` that captures all events until `done()` is called to stop or timeout happens.
|
||||
// The consumer works async in the background and returns all the captured events when `done()` is called.
|
||||
// This can be used to verify that certain events have happened.
|
||||
// Example usage:
|
||||
//
|
||||
// c, done := CaptureAllEventsConsumer(t)
|
||||
// query := `tm.event='Tx'`
|
||||
// cleanupFn := l.Subscribe(query, c)
|
||||
// t.Cleanup(cleanupFn)
|
||||
//
|
||||
// // do something in your test that create events
|
||||
//
|
||||
// assert.Len(t, done(), 1) // then verify your assumption
|
||||
func CaptureAllEventsConsumer(t *testing.T, optMaxWaitTime ...time.Duration) (c EventConsumer, done func() []ctypes.ResultEvent) {
|
||||
t.Helper()
|
||||
maxWaitTime := DefaultWaitTime
|
||||
if len(optMaxWaitTime) != 0 {
|
||||
maxWaitTime = optMaxWaitTime[0]
|
||||
}
|
||||
var (
|
||||
mu sync.Mutex
|
||||
capturedEvents []ctypes.ResultEvent
|
||||
exit bool
|
||||
)
|
||||
collectEventsConsumer := func(e ctypes.ResultEvent) (more bool) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if exit {
|
||||
return false
|
||||
}
|
||||
capturedEvents = append(capturedEvents, e)
|
||||
return true
|
||||
}
|
||||
|
||||
return TimeoutConsumer(t, maxWaitTime, collectEventsConsumer), func() []ctypes.ResultEvent {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
exit = true
|
||||
return capturedEvents
|
||||
}
|
||||
}
|
||||
|
||||
// restoreOriginalGenesis replace nodes genesis by the one created on setup
|
||||
func restoreOriginalGenesis(t *testing.T, s *SystemUnderTest) {
|
||||
t.Helper()
|
||||
src := filepath.Join(WorkDir, s.nodePath(0), "config", "genesis.json.orig")
|
||||
s.setGenesis(t, src)
|
||||
}
|
||||
|
||||
// restoreOriginalKeyring replaces test keyring with original
|
||||
func restoreOriginalKeyring(t *testing.T, s *SystemUnderTest) {
|
||||
t.Helper()
|
||||
dest := filepath.Join(WorkDir, s.outputDir, "keyring-test")
|
||||
require.NoError(t, os.RemoveAll(dest))
|
||||
for i := 0; i < s.initialNodesCount; i++ {
|
||||
src := filepath.Join(WorkDir, s.nodePath(i), "keyring-test")
|
||||
require.NoError(t, copyFilesInDir(src, dest))
|
||||
}
|
||||
}
|
||||
|
||||
// copyFile copy source file to dest file path
|
||||
func copyFile(src, dest string) (*os.File, error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// copyFilesInDir copy files in src dir to dest path
|
||||
func copyFilesInDir(src, dest string) error {
|
||||
err := os.MkdirAll(dest, 0o750)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mkdirs: %s", err)
|
||||
}
|
||||
fs, err := os.ReadDir(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read dir: %s", err)
|
||||
}
|
||||
for _, f := range fs {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
if _, err := copyFile(filepath.Join(src, f.Name()), filepath.Join(dest, f.Name())); err != nil {
|
||||
return fmt.Errorf("copy file: %q: %s", f.Name(), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func storeTempFile(t *testing.T, content []byte) *os.File {
|
||||
t.Helper()
|
||||
out, err := os.CreateTemp(t.TempDir(), "genesis")
|
||||
require.NoError(t, err)
|
||||
_, err = io.Copy(out, bytes.NewReader(content))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, out.Close())
|
||||
return out
|
||||
}
|
||||
132
systemtests/test_runner.go
Normal file
132
systemtests/test_runner.go
Normal file
@ -0,0 +1,132 @@
|
||||
//go:build system_test
|
||||
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
)
|
||||
|
||||
var (
|
||||
sut *SystemUnderTest
|
||||
verbose bool
|
||||
execBinaryName string
|
||||
)
|
||||
|
||||
func RunTests(m *testing.M) {
|
||||
waitTime := flag.Duration("wait-time", DefaultWaitTime, "time to wait for chain events")
|
||||
nodesCount := flag.Int("nodes-count", 4, "number of nodes in the cluster")
|
||||
blockTime := flag.Duration("block-time", 1000*time.Millisecond, "block creation time")
|
||||
execBinary := flag.String("binary", "simd", "executable binary for server/ client side")
|
||||
bech32Prefix := flag.String("bech32", "cosmos", "bech32 prefix to be used with addresses")
|
||||
flag.BoolVar(&verbose, "verbose", false, "verbose output")
|
||||
flag.Parse()
|
||||
|
||||
// fail fast on most common setup issue
|
||||
requireEnoughFileHandlers(*nodesCount + 1) // +1 as tests may start another node
|
||||
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
WorkDir = dir
|
||||
if verbose {
|
||||
println("Work dir: ", WorkDir)
|
||||
}
|
||||
initSDKConfig(*bech32Prefix)
|
||||
|
||||
DefaultWaitTime = *waitTime
|
||||
if *execBinary == "" {
|
||||
panic("executable binary name must not be empty")
|
||||
}
|
||||
execBinaryName = *execBinary
|
||||
|
||||
sut = NewSystemUnderTest(*execBinary, verbose, *nodesCount, *blockTime)
|
||||
sut.SetupChain() // setup chain and keyring
|
||||
|
||||
// run tests
|
||||
exitCode := m.Run()
|
||||
|
||||
// postprocess
|
||||
sut.StopChain()
|
||||
if verbose || exitCode != 0 {
|
||||
sut.PrintBuffer()
|
||||
printResultFlag(exitCode == 0)
|
||||
}
|
||||
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func GetSystemUnderTest() *SystemUnderTest {
|
||||
return sut
|
||||
}
|
||||
|
||||
func IsVerbose() bool {
|
||||
return verbose
|
||||
}
|
||||
|
||||
func GetExecutableName() string {
|
||||
return execBinaryName
|
||||
}
|
||||
|
||||
// requireEnoughFileHandlers uses `ulimit`
|
||||
func requireEnoughFileHandlers(nodesCount int) {
|
||||
ulimit, err := exec.LookPath("ulimit")
|
||||
if err != nil || ulimit == "" { // skip when not available
|
||||
return
|
||||
}
|
||||
|
||||
cmd := exec.Command(ulimit, "-n")
|
||||
cmd.Dir = WorkDir
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error :%#+v, output: %s", err, string(out)))
|
||||
}
|
||||
fileDescrCount, err := strconv.Atoi(strings.Trim(string(out), " \t\n"))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error :%#+v, output: %s", err, string(out)))
|
||||
}
|
||||
expFH := nodesCount * 260 // random number that worked on my box
|
||||
if fileDescrCount < expFH {
|
||||
panic(fmt.Sprintf("Fail fast. Insufficient setup. Run 'ulimit -n %d'", expFH))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func initSDKConfig(bech32Prefix string) {
|
||||
config := sdk.GetConfig()
|
||||
config.SetBech32PrefixForAccount(bech32Prefix, bech32Prefix+sdk.PrefixPublic)
|
||||
config.SetBech32PrefixForValidator(bech32Prefix+sdk.PrefixValidator+sdk.PrefixOperator, bech32Prefix+sdk.PrefixValidator+sdk.PrefixOperator+sdk.PrefixPublic)
|
||||
config.SetBech32PrefixForConsensusNode(bech32Prefix+sdk.PrefixValidator+sdk.PrefixConsensus, bech32Prefix+sdk.PrefixValidator+sdk.PrefixConsensus+sdk.PrefixPublic)
|
||||
}
|
||||
|
||||
const (
|
||||
successFlag = `
|
||||
___ _ _ ___ ___ ___ ___ ___
|
||||
/ __| | | |/ __/ __/ _ \/ __/ __|
|
||||
\__ \ |_| | (_| (_| __/\__ \__ \
|
||||
|___/\__,_|\___\___\___||___/___/`
|
||||
failureFlag = `
|
||||
__ _ _ _
|
||||
/ _| (_) | | |
|
||||
| |_ __ _ _| | ___ __| |
|
||||
| _/ _| | | |/ _ \/ _| |
|
||||
| || (_| | | | __/ (_| |
|
||||
|_| \__,_|_|_|\___|\__,_|`
|
||||
)
|
||||
|
||||
func printResultFlag(ok bool) {
|
||||
if ok {
|
||||
fmt.Println(successFlag)
|
||||
} else {
|
||||
fmt.Println(failureFlag)
|
||||
}
|
||||
}
|
||||
203
systemtests/testnet_init.go
Normal file
203
systemtests/testnet_init.go
Normal file
@ -0,0 +1,203 @@
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cometbft/cometbft/p2p"
|
||||
"github.com/creachadair/tomledit"
|
||||
"github.com/creachadair/tomledit/parser"
|
||||
)
|
||||
|
||||
// SingleHostTestnetCmdInitializer default testnet cmd that supports the --single-host param
|
||||
type SingleHostTestnetCmdInitializer struct {
|
||||
execBinary string
|
||||
workDir string
|
||||
chainID string
|
||||
outputDir string
|
||||
initialNodesCount int
|
||||
minGasPrice string
|
||||
commitTimeout time.Duration
|
||||
log func(string)
|
||||
}
|
||||
|
||||
// NewSingleHostTestnetCmdInitializer constructor
|
||||
func NewSingleHostTestnetCmdInitializer(
|
||||
execBinary, workDir, chainID, outputDir string,
|
||||
initialNodesCount int,
|
||||
minGasPrice string,
|
||||
commitTimeout time.Duration,
|
||||
log func(string),
|
||||
) *SingleHostTestnetCmdInitializer {
|
||||
return &SingleHostTestnetCmdInitializer{
|
||||
execBinary: execBinary,
|
||||
workDir: workDir,
|
||||
chainID: chainID,
|
||||
outputDir: outputDir,
|
||||
initialNodesCount: initialNodesCount,
|
||||
minGasPrice: minGasPrice,
|
||||
commitTimeout: commitTimeout,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (s SingleHostTestnetCmdInitializer) Initialize() {
|
||||
args := []string{
|
||||
"testnet",
|
||||
"init-files",
|
||||
"--chain-id=" + s.chainID,
|
||||
"--output-dir=" + s.outputDir,
|
||||
"--validator-count=" + strconv.Itoa(s.initialNodesCount),
|
||||
"--keyring-backend=test",
|
||||
"--minimum-gas-prices=" + s.minGasPrice,
|
||||
"--commit-timeout=" + s.commitTimeout.String(),
|
||||
"--single-host",
|
||||
}
|
||||
s.log(fmt.Sprintf("+++ %s %s\n", s.execBinary, strings.Join(args, " ")))
|
||||
out, err := runShellCmdX(s.execBinary, args...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.log(out)
|
||||
}
|
||||
|
||||
// ModifyConfigYamlInitializer testnet cmd prior to --single-host param. Modifies the toml files.
|
||||
type ModifyConfigYamlInitializer struct {
|
||||
execBinary string
|
||||
workDir string
|
||||
chainID string
|
||||
outputDir string
|
||||
initialNodesCount int
|
||||
minGasPrice string
|
||||
commitTimeout time.Duration
|
||||
log func(string)
|
||||
projectName string
|
||||
}
|
||||
|
||||
func NewModifyConfigYamlInitializer(exec string, s *SystemUnderTest) *ModifyConfigYamlInitializer {
|
||||
return &ModifyConfigYamlInitializer{
|
||||
execBinary: exec,
|
||||
workDir: WorkDir,
|
||||
chainID: s.chainID,
|
||||
outputDir: s.outputDir,
|
||||
initialNodesCount: s.initialNodesCount,
|
||||
minGasPrice: s.minGasPrice,
|
||||
commitTimeout: s.CommitTimeout(),
|
||||
log: s.Log,
|
||||
projectName: s.projectName,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
rpcPortStart = 26657
|
||||
apiPortStart = 1317
|
||||
grpcPortStart = 9090
|
||||
p2pPortStart = 16656
|
||||
)
|
||||
|
||||
func (s ModifyConfigYamlInitializer) Initialize() {
|
||||
// init with legacy testnet command
|
||||
args := []string{
|
||||
"testnet",
|
||||
"init-files",
|
||||
"--chain-id=" + s.chainID,
|
||||
"--output-dir=" + s.outputDir,
|
||||
"--v=" + strconv.Itoa(s.initialNodesCount),
|
||||
"--keyring-backend=test",
|
||||
"--minimum-gas-prices=" + s.minGasPrice,
|
||||
}
|
||||
s.log(fmt.Sprintf("+++ %s %s\n", s.execBinary, strings.Join(args, " ")))
|
||||
|
||||
out, err := runShellCmdX(s.execBinary, args...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.log(out)
|
||||
|
||||
nodeAddresses := make([]string, s.initialNodesCount)
|
||||
for i := 0; i < s.initialNodesCount; i++ {
|
||||
nodeDir := filepath.Join(WorkDir, NodePath(i, s.outputDir, s.projectName), "config")
|
||||
id := string(mustV(p2p.LoadNodeKey(filepath.Join(nodeDir, "node_key.json"))).ID())
|
||||
nodeAddresses[i] = fmt.Sprintf("%s@127.0.0.1:%d", id, p2pPortStart+i)
|
||||
}
|
||||
|
||||
// then update configs
|
||||
for i := 0; i < s.initialNodesCount; i++ {
|
||||
nodeDir := filepath.Join(WorkDir, NodePath(i, s.outputDir, s.projectName), "config")
|
||||
nodeNumber := i
|
||||
EditToml(filepath.Join(nodeDir, "config.toml"), func(doc *tomledit.Document) {
|
||||
UpdatePort(doc, rpcPortStart+i, "rpc", "laddr")
|
||||
UpdatePort(doc, p2pPortStart+i, "p2p", "laddr")
|
||||
SetBool(doc, false, "p2p", "addr_book_strict")
|
||||
SetBool(doc, false, "p2p", "pex")
|
||||
SetBool(doc, true, "p2p", "allow_duplicate_ip")
|
||||
peers := make([]string, s.initialNodesCount)
|
||||
copy(peers, nodeAddresses[0:nodeNumber])
|
||||
copy(peers[nodeNumber:], nodeAddresses[nodeNumber+1:])
|
||||
SetValue(doc, strings.Join(peers, ","), "p2p", "persistent_peers")
|
||||
SetValue(doc, s.commitTimeout.String(), "consensus", "timeout_commit")
|
||||
})
|
||||
EditToml(filepath.Join(nodeDir, "app.toml"), func(doc *tomledit.Document) {
|
||||
UpdatePort(doc, apiPortStart+i, "api", "address")
|
||||
UpdatePort(doc, grpcPortStart+i, "grpc", "address")
|
||||
SetBool(doc, true, "grpc-web", "enable")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func EditToml(filename string, f func(doc *tomledit.Document)) {
|
||||
tomlFile := mustV(os.OpenFile(filename, os.O_RDWR, 0o600))
|
||||
defer tomlFile.Close()
|
||||
doc := mustV(tomledit.Parse(tomlFile))
|
||||
f(doc)
|
||||
mustV(tomlFile.Seek(0, 0)) // reset the cursor to the beginning of the file
|
||||
must(tomlFile.Truncate(0))
|
||||
must(tomledit.Format(tomlFile, doc))
|
||||
}
|
||||
|
||||
func SetBool(doc *tomledit.Document, newVal bool, xpath ...string) {
|
||||
e := doc.First(xpath...)
|
||||
if e == nil {
|
||||
panic(fmt.Sprintf("not found: %v", xpath))
|
||||
}
|
||||
e.Value = parser.MustValue(strconv.FormatBool(newVal))
|
||||
}
|
||||
|
||||
func SetValue(doc *tomledit.Document, newVal string, xpath ...string) {
|
||||
e := doc.First(xpath...)
|
||||
if e == nil {
|
||||
panic(fmt.Sprintf("not found: %v", xpath))
|
||||
}
|
||||
e.Value = parser.MustValue(fmt.Sprintf("%q", newVal))
|
||||
}
|
||||
|
||||
func UpdatePort(doc *tomledit.Document, newPort int, xpath ...string) {
|
||||
e := doc.First(xpath...)
|
||||
if e == nil {
|
||||
panic(fmt.Sprintf("not found: %v", xpath))
|
||||
}
|
||||
data := e.Value.X.String()
|
||||
pos := strings.LastIndexAny(data, ":")
|
||||
if pos == -1 {
|
||||
panic("column not found")
|
||||
}
|
||||
data = data[0:pos+1] + strconv.Itoa(newPort)
|
||||
e.Value = parser.MustValue(data + "\"")
|
||||
}
|
||||
|
||||
// mustV same as must but with value returned
|
||||
func mustV[T any](r T, err error) T {
|
||||
must(err)
|
||||
return r
|
||||
}
|
||||
|
||||
// must simple panic on error for fluent calls
|
||||
func must(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
113
systemtests/upgrade_test.go
Normal file
113
systemtests/upgrade_test.go
Normal file
@ -0,0 +1,113 @@
|
||||
//go:build system_test && linux
|
||||
|
||||
package systemtests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tidwall/gjson"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/types/address"
|
||||
)
|
||||
|
||||
func TestChainUpgrade(t *testing.T) {
|
||||
// Scenario:
|
||||
// start a legacy chain with some state
|
||||
// when a chain upgrade proposal is executed
|
||||
// then the chain upgrades successfully
|
||||
sut.StopChain()
|
||||
|
||||
legacyBinary := FetchExecutable(t, "v0.50")
|
||||
t.Logf("+++ legacy binary: %s\n", legacyBinary)
|
||||
currentBranchBinary := sut.execBinary
|
||||
currentInitializer := sut.testnetInitializer
|
||||
sut.SetExecBinary(legacyBinary)
|
||||
sut.SetTestnetInitializer(NewModifyConfigYamlInitializer(legacyBinary, sut))
|
||||
sut.SetupChain()
|
||||
votingPeriod := 5 * time.Second // enough time to vote
|
||||
sut.ModifyGenesisJSON(t, SetGovVotingPeriod(t, votingPeriod))
|
||||
|
||||
const (
|
||||
upgradeHeight int64 = 22
|
||||
upgradeName = "v050-to-v051"
|
||||
)
|
||||
|
||||
sut.StartChain(t, fmt.Sprintf("--halt-height=%d", upgradeHeight))
|
||||
|
||||
cli := NewCLIWrapper(t, sut, verbose)
|
||||
govAddr := sdk.AccAddress(address.Module("gov")).String()
|
||||
// submit upgrade proposal
|
||||
proposal := fmt.Sprintf(`
|
||||
{
|
||||
"messages": [
|
||||
{
|
||||
"@type": "/cosmos.upgrade.v1beta1.MsgSoftwareUpgrade",
|
||||
"authority": %q,
|
||||
"plan": {
|
||||
"name": %q,
|
||||
"height": "%d"
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": "ipfs://CID",
|
||||
"deposit": "100000000stake",
|
||||
"title": "my upgrade",
|
||||
"summary": "testing"
|
||||
}`, govAddr, upgradeName, upgradeHeight)
|
||||
proposalID := cli.SubmitAndVoteGovProposal(proposal)
|
||||
t.Logf("current_height: %d\n", sut.currentHeight)
|
||||
raw := cli.CustomQuery("q", "gov", "proposal", proposalID)
|
||||
t.Log(raw)
|
||||
|
||||
sut.AwaitBlockHeight(t, upgradeHeight-1, 60*time.Second)
|
||||
t.Logf("current_height: %d\n", sut.currentHeight)
|
||||
raw = cli.CustomQuery("q", "gov", "proposal", proposalID)
|
||||
proposalStatus := gjson.Get(raw, "proposal.status").String()
|
||||
require.Equal(t, "3", proposalStatus, raw) // PROPOSAL_STATUS_PASSED
|
||||
|
||||
t.Log("waiting for upgrade info")
|
||||
sut.AwaitUpgradeInfo(t)
|
||||
sut.StopChain()
|
||||
|
||||
t.Log("Upgrade height was reached. Upgrading chain")
|
||||
sut.SetExecBinary(currentBranchBinary)
|
||||
sut.SetTestnetInitializer(currentInitializer)
|
||||
sut.StartChain(t)
|
||||
cli = NewCLIWrapper(t, sut, verbose)
|
||||
|
||||
// smoke test that new version runs
|
||||
ownerAddr := cli.GetKeyAddr(defaultSrcAddr)
|
||||
got := cli.Run("tx", "accounts", "init", "continuous-locking-account", `{"end_time":"2034-01-22T11:38:15.116127Z", "owner":"`+ownerAddr+`"}`, "--from="+defaultSrcAddr)
|
||||
RequireTxSuccess(t, got)
|
||||
got = cli.Run("tx", "protocolpool", "fund-community-pool", "100stake", "--from="+defaultSrcAddr)
|
||||
RequireTxSuccess(t, got)
|
||||
}
|
||||
|
||||
const cacheDir = "binaries"
|
||||
|
||||
// FetchExecutable to download and extract tar.gz for linux
|
||||
func FetchExecutable(t *testing.T, version string) string {
|
||||
// use local cache
|
||||
cacheFolder := filepath.Join(WorkDir, cacheDir)
|
||||
err := os.MkdirAll(cacheFolder, 0o777)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cacheFile := filepath.Join(cacheFolder, fmt.Sprintf("%s_%s", execBinaryName, version))
|
||||
if _, err := os.Stat(cacheFile); err == nil {
|
||||
return cacheFile
|
||||
}
|
||||
destFile := cacheFile
|
||||
t.Log("+++ version not in cache, downloading from docker image")
|
||||
runShellCmd(t, "docker", "pull", "ghcr.io/cosmos/simapp:"+version)
|
||||
runShellCmd(t, "docker", "create", "--name=ci_temp", "ghcr.io/cosmos/simapp:"+version)
|
||||
runShellCmd(t, "docker", "cp", "ci_temp:/usr/bin/simd", destFile)
|
||||
return destFile
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user