Compare commits

...

7 Commits

Author SHA1 Message Date
1646dd6d05 dockerignore 2023-10-19 08:56:11 -05:00
981bd6a5b2 tests: use :local image tag 2023-10-19 08:56:11 -05:00
5fe9add0af dockerfile caching 2023-10-19 08:56:11 -05:00
006655f0f8 let user build sdk-tester image 2023-10-19 08:56:11 -05:00
d82c676777 don't error when store key missing 2023-10-19 08:56:11 -05:00
61fe74ca3b build cleanup
docker ignore

dockerfile cleanup

clean up init script

forward LOGLEVEL
2023-09-21 19:30:02 +08:00
afc40702e2 test improvements
handle request errors

rm cruft

test logging

cleanup test script
2023-09-21 19:29:27 +08:00
24 changed files with 196 additions and 395 deletions

View File

@ -1,5 +1,7 @@
# localnet-setup Dockerfile
localnet-setup
# build **/node_modules
build init.sh
build
localnet-setup

5
.github/CODEOWNERS vendored
View File

@ -1,5 +0,0 @@
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
# Primary (global) repo maintainers
* @evmos/core-engineering

View File

@ -94,39 +94,34 @@ jobs:
ref: jest_timeout ref: jest_timeout
- name: Environment - name: Environment
run: ls -tlh && env run: ls -tlh && env
- name: build containers scripts - name: Build containers scripts
working-directory: tests/sdk_tests working-directory: tests/sdk_tests
run: ./build-laconicd-container.sh && ./build-sdk-test-container.sh run: ./build-laconicd-container.sh && ./build-sdk-test-container.sh
- name: start containers
- name: Start containers
working-directory: tests/sdk_tests working-directory: tests/sdk_tests
run: docker compose up -d run: docker compose up -d
- name: run-tests.sh - name: Run tests
working-directory: tests/sdk_tests working-directory: tests/sdk_tests
run: ./run-tests.sh run: ./run-tests.sh
- name: reset containers for auction tests
- name: Start auction containers
working-directory: tests/sdk_tests working-directory: tests/sdk_tests
if: always() env:
run: docker compose down TEST_AUCTION_ENABLED: true
- name: start auction containers run: docker compose up -d
- name: Run auction tests
working-directory: tests/sdk_tests working-directory: tests/sdk_tests
run: docker compose -f docker-compose-auctions.yml up -d run: ./run-tests.sh test:auctions
- name: run-acution-tests.sh
- name: Start auction containers
working-directory: tests/sdk_tests working-directory: tests/sdk_tests
run: ./run-auction-tests.sh env:
- name: reset containers for nameservice tests TEST_REGISTRY_EXPIRY: true
run: docker compose up -d
- name: Run nameservice expiry tests
working-directory: tests/sdk_tests working-directory: tests/sdk_tests
if: always() run: ./run-tests.sh test:nameservice-expiry
run: docker compose -f docker-compose-auctions.yml down
- name: start auction containers
working-directory: tests/sdk_tests
run: docker compose -f docker-compose-nameservice.yml up -d
- name: run-nameservice-expiry-tests.sh
working-directory: tests/sdk_tests
run: ./run-nameservice-expiry-tests.sh
- name: reset containers for nameservice tests
working-directory: tests/sdk_tests
if: always()
run: docker compose -f docker-compose-nameservice.yml down
# integration_tests: # integration_tests:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest

View File

@ -1,14 +1,14 @@
FROM golang:alpine AS build-env FROM golang:alpine AS build-env
# Set up dependencies # Install dependencies
ENV PACKAGES git build-base RUN apk add --update git build-base linux-headers
# Set working directory for the build # Set working directory for the build
WORKDIR /go/src/github.com/cerc-io/laconicd WORKDIR /go/src/github.com/cerc-io/laconicd
# Install dependencies # Cache Go modules
RUN apk add --update $PACKAGES COPY go.mod go.sum ./
RUN apk add linux-headers RUN go mod download
# Add source files # Add source files
COPY . . COPY . .
@ -21,10 +21,10 @@ FROM alpine:3.17.0
# Install ca-certificates # Install ca-certificates
RUN apk add --update ca-certificates jq curl RUN apk add --update ca-certificates jq curl
WORKDIR /
# Copy over binaries from the build-env # Copy over binaries from the build-env
COPY --from=build-env /go/src/github.com/cerc-io/laconicd/build/laconicd /usr/bin/laconicd COPY --from=build-env /go/src/github.com/cerc-io/laconicd/build/laconicd /usr/bin/laconicd
WORKDIR /
# Run laconicd by default # Run laconicd by default
CMD ["laconicd"] ENTRYPOINT ["laconicd"]

View File

@ -72,7 +72,7 @@ ifeq ($(ENABLE_ROCKSDB),true)
BUILD_TAGS += rocksdb_build BUILD_TAGS += rocksdb_build
test_tags += rocksdb_build test_tags += rocksdb_build
else else
$(warning RocksDB support is disabled; to build and test with RocksDB support, set ENABLE_ROCKSDB=true) $(info RocksDB support is disabled; to build and test with RocksDB support, set ENABLE_ROCKSDB=true)
endif endif
# DB backend selection # DB backend selection
@ -145,7 +145,7 @@ docker-build:
# update old container # update old container
docker rm laconicd || true docker rm laconicd || true
# create a new container from the latest image # create a new container from the latest image
docker create --name laconic -t -i cerc-io/laconicd:latest laconicd docker create --name laconic -t -i ${DOCKER_IMAGE}:${DOCKER_TAG} laconicd
# move the binaries to the ./build directory # move the binaries to the ./build directory
mkdir -p ./build/ mkdir -p ./build/
docker cp laconic:/usr/bin/laconicd ./build/ docker cp laconic:/usr/bin/laconicd ./build/
@ -316,7 +316,7 @@ TEST_TARGETS := test-unit test-unit-cover test-race
# Test runs-specific rules. To add a new test target, just add # Test runs-specific rules. To add a new test target, just add
# a new rule, customise ARGS or TEST_PACKAGES ad libitum, and # a new rule, customise ARGS or TEST_PACKAGES ad libitum, and
# append the new rule to the TEST_TARGETS list. # append the new rule to the TEST_TARGETS list.
test-unit: ARGS=-timeout=10m -race test-unit: ARGS=-timeout=10m -race -test.v
test-unit: TEST_PACKAGES=$(PACKAGES_UNIT) test-unit: TEST_PACKAGES=$(PACKAGES_UNIT)
test-race: ARGS=-race test-race: ARGS=-race

103
init.sh
View File

@ -5,19 +5,20 @@ CHAINID="laconic_9000-1"
MONIKER="localtestnet" MONIKER="localtestnet"
KEYRING="test" KEYRING="test"
KEYALGO="eth_secp256k1" KEYALGO="eth_secp256k1"
LOGLEVEL="info" LOGLEVEL="${LOGLEVEL:-info}"
# trace evm # trace evm
TRACE="--trace" TRACE="--trace"
# TRACE="" # TRACE=""
# validate dependencies are installed # validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; } command -v jq > /dev/null 2>&1 || {
echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"
exit 1
}
# remove existing daemon and client # remove existing daemon and client
rm -rf ~/.laconic* rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID laconicd config chain-id $CHAINID
@ -27,71 +28,70 @@ laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer) # Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID laconicd init $MONIKER --chain-id $CHAINID
update_genesis() {
jq "$1" $HOME/.laconicd/config/genesis.json > $HOME/.laconicd/config/tmp_genesis.json &&
mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
}
# Change parameter token denominations to aphoton # Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["staking"]["params"]["bond_denom"]="aphoton"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["mint"]["params"]["mint_denom"]="aphoton"'
# Custom modules # Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"'
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests." echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["record_rent_duration"]="60s"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_grace_period"]="60s"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_rent_duration"]="60s"'
fi fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers." echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_auction_enabled"]=true'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_rent_duration"]="60s"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_grace_period"]="300s"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"'
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"'
fi fi
# increase block time (?) # increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.consensus_params["block"]["time_iota_ms"]="1000"'
# Set gas limit in genesis # Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json update_genesis '.consensus_params["block"]["max_gas"]="10000000"'
# disable produce empty block # disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi fi
if [[ $1 == "pending" ]]; then if [[ "$1" == "pending" ]]; then
alias sed-i="sed -i"
if [[ "$OSTYPE" == "darwin"* ]]; then if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml alias sed-i="sed -i ''"
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi fi
sed-i \
-e 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' \
-e 's/timeout_propose = "3s"/timeout_propose = "30s"/g' \
-e 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' \
-e 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' \
-e 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' \
-e 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' \
-e 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' \
-e 's/timeout_commit = "5s"/timeout_commit = "150s"/g' \
-e 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' \
$HOME/.laconicd/config/config.toml
fi fi
# Allocate genesis accounts (cosmos formatted addresses) # Allocate genesis accounts (cosmos formatted addresses)
@ -106,9 +106,16 @@ laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly # Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis laconicd validate-genesis
if [[ $1 == "pending" ]]; then if [[ "$1" == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed." echo "pending mode is on, please wait for the first block committed."
fi fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed) # Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground laconicd start \
--pruning=nothing \
--evm.tracer=json $TRACE \
--log_level $LOGLEVEL \
--minimum-gas-prices=0.0001aphoton \
--json-rpc.api eth,txpool,personal,net,debug,web3,miner \
--api.enable \
--gql-server --gql-playground

View File

@ -56,7 +56,7 @@ message Params {
]; ];
} }
// Params defines the registry module records // Record defines a registry record
message Record { message Record {
string id = 1 [(gogoproto.moretags) = "json:\"id\" yaml:\"id\""]; string id = 1 [(gogoproto.moretags) = "json:\"id\" yaml:\"id\""];
string bond_id = 2 [(gogoproto.moretags) = "json:\"bondId\" yaml:\"bondId\""]; string bond_id = 2 [(gogoproto.moretags) = "json:\"bondId\" yaml:\"bondId\""];
@ -69,7 +69,7 @@ message Record {
string type = 9 [(gogoproto.moretags) = "json:\"types\" yaml:\"types\""]; string type = 9 [(gogoproto.moretags) = "json:\"types\" yaml:\"types\""];
} }
// AuthorityEntry defines the registry module AuthorityEntries // AuthorityEntry defines a registry authority
message AuthorityEntry { message AuthorityEntry {
string name = 1; string name = 1;
NameAuthority entry = 2; NameAuthority entry = 2;
@ -99,7 +99,7 @@ message NameEntry {
NameRecord entry = 2; NameRecord entry = 2;
} }
// NameRecord // NameRecord defines a versioned name record
message NameRecord { message NameRecord {
NameRecordEntry latest = 1; NameRecordEntry latest = 1;
repeated NameRecordEntry history = 2; repeated NameRecordEntry history = 2;
@ -131,4 +131,4 @@ message BlockChangeSet {
message AuctionBidInfo { message AuctionBidInfo {
string auction_id = 1 [(gogoproto.moretags) = "json:\"auctionID\" yaml:\"auctionID\""]; string auction_id = 1 [(gogoproto.moretags) = "json:\"auctionID\" yaml:\"auctionID\""];
string bidder_address = 2 [(gogoproto.moretags) = "json:\"bidderAddress\" yaml:\"bidderAddress\""]; string bidder_address = 2 [(gogoproto.moretags) = "json:\"bidderAddress\" yaml:\"bidderAddress\""];
} }

View File

@ -1,52 +0,0 @@
# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile
# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster
ARG VARIANT=16-bullseye
FROM node:${VARIANT}
ARG USERNAME=node
ARG NPM_GLOBAL=/usr/local/share/npm-global
# Add NPM global to PATH.
ENV PATH=${NPM_GLOBAL}/bin:${PATH}
RUN \
# Configure global npm install location, use group to adapt to UID/GID changes
if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \
&& usermod -a -G npm ${USERNAME} \
&& umask 0002 \
&& mkdir -p ${NPM_GLOBAL} \
&& touch /usr/local/etc/npmrc \
&& chown ${USERNAME}:npm ${NPM_GLOBAL} /usr/local/etc/npmrc \
&& chmod g+s ${NPM_GLOBAL} \
&& npm config -g set prefix ${NPM_GLOBAL} \
&& su ${USERNAME} -c "npm config -g set prefix ${NPM_GLOBAL}" \
# Install eslint
&& su ${USERNAME} -c "umask 0002 && npm install -g eslint" \
&& npm cache clean --force > /dev/null 2>&1
# [Optional] Uncomment this section to install additional OS packages.
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# && apt-get -y install --no-install-recommends <your-package-list-here>
# [Optional] Uncomment if you want to install an additional version of node using nvm
# ARG EXTRA_NODE_VERSION=10
# RUN su node -c "source /usr/local/share/nvm/nvm.sh && nvm install ${EXTRA_NODE_VERSION}"
# [Optional] Uncomment if you want to install more global node modules
# RUN su node -c "npm install -g <your-package-list-here>"
WORKDIR /
COPY entrypoint.sh .
ENTRYPOINT ["/entrypoint.sh"]
# Placeholder CMD : generally this will be overridden at run time like :
# docker run -it -v /home/builder/cerc/laconic-sdk:/workspace cerc/builder-js sh -c 'cd /workspace && yarn && yarn build'
CMD node --version
# Temp hack, clone the laconic-sdk repo here
WORKDIR /app
RUN \
git clone https://github.com/cerc-io/laconic-sdk.git \
&& cd laconic-sdk \
&& yarn install
WORKDIR /app/laconic-sdk

View File

@ -1,3 +1,2 @@
#!/usr/bin/env bash #!/usr/bin/env bash
docker build -t cerc-io/laconicd:local-test ../../ docker build -t cerc/laconicd:local ../.. --progress=plain

View File

@ -1,3 +0,0 @@
#!/usr/bin/env bash
docker build -t cerc-io/laconic-sdk-tester:local-test -f Dockerfile-sdk .

View File

@ -1,31 +0,0 @@
services:
laconicd:
restart: unless-stopped
image: cerc-io/laconicd:local-test
environment:
- TEST_AUCTION_ENABLED=true
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
- ../../init.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
healthcheck:
test: ["CMD", "curl", "-v", "http://127.0.0.1:6060"]
interval: 1s
timeout: 5s
retries: 30
ports:
- "6060"
- "26657"
- "26656"
- "9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"
sdk-test-runner:
image: cerc-io/laconic-sdk-tester:local-test
depends_on:
laconicd:
condition: service_healthy
command: tail -F /dev/null

View File

@ -1,31 +0,0 @@
services:
laconicd:
restart: unless-stopped
image: cerc-io/laconicd:local-test
environment:
- TEST_REGISTRY_EXPIRY=true
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
- ../../init.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
healthcheck:
test: ["CMD", "curl", "-v", "http://127.0.0.1:6060"]
interval: 1s
timeout: 5s
retries: 30
ports:
- "6060"
- "26657"
- "26656"
- "9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"
sdk-test-runner:
image: cerc-io/laconic-sdk-tester:local-test
depends_on:
laconicd:
condition: service_healthy
command: tail -F /dev/null

View File

@ -1,10 +1,14 @@
services: services:
laconicd: laconicd:
restart: unless-stopped restart: unless-stopped
image: cerc-io/laconicd:local-test image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"] entrypoint: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
environment:
- TEST_AUCTION_ENABLED
- TEST_REGISTRY_EXPIRY
- LOGLEVEL
volumes: volumes:
- ../../init.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh - ../../init.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
healthcheck: healthcheck:
test: ["CMD", "curl", "-v", "http://127.0.0.1:6060"] test: ["CMD", "curl", "-v", "http://127.0.0.1:6060"]
interval: 1s interval: 1s
@ -22,7 +26,7 @@ services:
- "1317" - "1317"
sdk-test-runner: sdk-test-runner:
image: cerc-io/laconic-sdk-tester:local-test image: cerc/laconic-sdk-tester:local
depends_on: depends_on:
laconicd: laconicd:
condition: service_healthy condition: service_healthy

View File

@ -1,3 +0,0 @@
#!/bin/sh
exec "$@"

View File

@ -1,16 +0,0 @@
#!/usr/bin/env bash
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Get the key from laconicd
laconicd_key=$( docker compose exec laconicd echo y | docker compose exec laconicd laconicd keys export mykey --unarmored-hex --unsafe )
# Set parameters for the test suite
cosmos_chain_id=laconic_9000-1
laconicd_rest_endpoint=http://laconicd:1317
laconicd_gql_endpoint=http://laconicd:9473/api
# Run tests
docker network inspect sdk_tests_default
sleep 30s
docker logs sdk_tests-laconicd-1
docker compose exec sdk-test-runner sh -c "COSMOS_CHAIN_ID=${cosmos_chain_id} LACONICD_REST_ENDPOINT=${laconicd_rest_endpoint} LACONICD_GQL_ENDPOINT=${laconicd_gql_endpoint} PRIVATE_KEY=${laconicd_key} yarn test:auctions"

View File

@ -1,16 +0,0 @@
#!/usr/bin/env bash
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Get the key from laconicd
laconicd_key=$( docker compose exec laconicd echo y | docker compose exec laconicd laconicd keys export mykey --unarmored-hex --unsafe )
# Set parameters for the test suite
cosmos_chain_id=laconic_9000-1
laconicd_rest_endpoint=http://laconicd:1317
laconicd_gql_endpoint=http://laconicd:9473/api
# Run tests
docker network inspect sdk_tests_default
sleep 30s
docker logs sdk_tests-laconicd-1
docker compose exec sdk-test-runner sh -c "COSMOS_CHAIN_ID=${cosmos_chain_id} LACONICD_REST_ENDPOINT=${laconicd_rest_endpoint} LACONICD_GQL_ENDPOINT=${laconicd_gql_endpoint} PRIVATE_KEY=${laconicd_key} yarn test:nameservice-expiry"

View File

@ -1,18 +1,26 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Forwards all args to yarn on the sdk-test-runner container
if [ -n "$CERC_SCRIPT_DEBUG" ]; then if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x set -x
fi fi
yarn_args=("--inspect-brk=8888")
yarn_args+=("${@:-test}")
# Get the key from laconicd # Get the key from laconicd
laconicd_key=$( docker compose exec laconicd echo y | docker compose exec laconicd laconicd keys export mykey --unarmored-hex --unsafe ) laconicd_key=$(
yes | docker compose exec laconicd laconicd keys export mykey --unarmored-hex --unsafe
)
# Set parameters for the test suite # Set parameters for the test suite
cosmos_chain_id=laconic_9000-1 cosmos_chain_id=laconic_9000-1
laconicd_rest_endpoint=http://laconicd:1317 laconicd_rest_endpoint=http://laconicd:1317
laconicd_gql_endpoint=http://laconicd:9473/api laconicd_gql_endpoint=http://laconicd:9473/api
# Run tests
docker network inspect sdk_tests_default
sleep 30s
docker logs laconicd
docker compose exec laconicd sh -c "curl http://127.0.0.1:9473/api"
docker compose exec laconicd sh -c "curl http://localhost:9473/api"
docker compose exec sdk-test-runner sh -c "COSMOS_CHAIN_ID=${cosmos_chain_id} LACONICD_REST_ENDPOINT=${laconicd_rest_endpoint} LACONICD_GQL_ENDPOINT=${laconicd_gql_endpoint} PRIVATE_KEY=${laconicd_key} yarn test" # Run tests
docker compose exec \
-e COSMOS_CHAIN_ID="$cosmos_chain_id" \
-e LACONICD_REST_ENDPOINT="$laconicd_rest_endpoint" \
-e LACONICD_GQL_ENDPOINT="$laconicd_gql_endpoint" \
-e PRIVATE_KEY="$laconicd_key" \
sdk-test-runner yarn run "${yarn_args[@]}"

View File

@ -519,6 +519,7 @@ func New(l Logger, baseDir string, cfg Config) (*Network, error) {
l.Log("starting test network...") l.Log("starting test network...")
for _, v := range network.Validators { for _, v := range network.Validators {
l.Log("starting validator:", v.Moniker)
err := startInProcess(cfg, v) err := startInProcess(cfg, v)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -72,10 +72,6 @@ $ %s tx %s set [payload file path] [bond-id]
} }
msg := types.NewMsgSetRecord(payload, args[1], clientCtx.GetFromAddress()) msg := types.NewMsgSetRecord(payload, args[1], clientCtx.GetFromAddress())
err = msg.ValidateBasic()
if err != nil {
return err
}
return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg)
}, },
} }

View File

@ -42,7 +42,8 @@ func (s *IntegrationTestSuite) TestGRPCQueryParams() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
resp, _ := rest.GetRequest(tc.url) resp, err := rest.GetRequest(tc.url)
s.NoError(err)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -60,7 +61,7 @@ func (s *IntegrationTestSuite) TestGRPCQueryParams() {
} }
} }
//nolint: all // nolint: all
func (s *IntegrationTestSuite) TestGRPCQueryWhoIs() { func (s *IntegrationTestSuite) TestGRPCQueryWhoIs() {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
@ -110,11 +111,11 @@ func (s *IntegrationTestSuite) TestGRPCQueryWhoIs() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun(authorityName)
tc.preRun(authorityName) tc.url = fmt.Sprintf(tc.url, authorityName)
tc.url = fmt.Sprintf(tc.url, authorityName)
} resp, err := rest.GetRequest(tc.url)
resp, _ := rest.GetRequest(tc.url) s.NoError(err)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -131,7 +132,7 @@ func (s *IntegrationTestSuite) TestGRPCQueryWhoIs() {
func (s *IntegrationTestSuite) TestGRPCQueryLookup() { func (s *IntegrationTestSuite) TestGRPCQueryLookup() {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
reqURL := val.APIAddress + "/vulcanize/registry/v1beta1/lookup?crn=%s" reqURL := val.APIAddress + "/vulcanize/registry/v1beta1/lookup"
authorityName := "QueryLookUp" authorityName := "QueryLookUp"
testCases := []struct { testCases := []struct {
@ -151,7 +152,7 @@ func (s *IntegrationTestSuite) TestGRPCQueryLookup() {
}, },
{ {
"Success", "Success",
reqURL, fmt.Sprintf(reqURL+"?crn=crn://%s/", authorityName),
false, false,
"", "",
func(authorityName string) { func(authorityName string) {
@ -163,11 +164,9 @@ func (s *IntegrationTestSuite) TestGRPCQueryLookup() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun(authorityName)
tc.preRun(authorityName) resp, err := rest.GetRequest(tc.url)
tc.url = fmt.Sprintf(reqURL, fmt.Sprintf("crn://%s/", authorityName)) s.NoError(err)
}
resp, _ := rest.GetRequest(tc.url)
if tc.expectErr { if tc.expectErr {
sr.Contains(string(resp), tc.errorMsg) sr.Contains(string(resp), tc.errorMsg)
} else { } else {
@ -180,7 +179,7 @@ func (s *IntegrationTestSuite) TestGRPCQueryLookup() {
} }
} }
//nolint: all // nolint: all
func (s *IntegrationTestSuite) TestGRPCQueryRecordExpiryQueue() { func (s *IntegrationTestSuite) TestGRPCQueryRecordExpiryQueue() {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
@ -233,12 +232,11 @@ func (s *IntegrationTestSuite) TestGRPCQueryRecordExpiryQueue() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun(s.bondID)
tc.preRun(s.bondID)
}
// wait 12 seconds for records expires // wait 12 seconds for records expires
time.Sleep(time.Second * 12) time.Sleep(time.Second * 12)
resp, _ := rest.GetRequest(tc.url) resp, err := rest.GetRequest(tc.url)
s.NoError(err)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -252,7 +250,7 @@ func (s *IntegrationTestSuite) TestGRPCQueryRecordExpiryQueue() {
} }
} }
//nolint: all // nolint: all
func (s *IntegrationTestSuite) TestGRPCQueryAuthorityExpiryQueue() { func (s *IntegrationTestSuite) TestGRPCQueryAuthorityExpiryQueue() {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
@ -303,13 +301,12 @@ func (s *IntegrationTestSuite) TestGRPCQueryAuthorityExpiryQueue() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun("QueryAuthorityExpiryQueue")
tc.preRun("QueryAuthorityExpiryQueue")
}
// wait 12 seconds to name authorites expires // wait 12 seconds to name authorites expires
time.Sleep(time.Second * 12) time.Sleep(time.Second * 12)
resp, _ := rest.GetRequest(tc.url) resp, err := rest.GetRequest(tc.url)
s.NoError(err)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -324,7 +321,7 @@ func (s *IntegrationTestSuite) TestGRPCQueryAuthorityExpiryQueue() {
} }
} }
//nolint: all // nolint: all
func (s *IntegrationTestSuite) TestGRPCQueryListRecords() { func (s *IntegrationTestSuite) TestGRPCQueryListRecords() {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
@ -377,10 +374,9 @@ func (s *IntegrationTestSuite) TestGRPCQueryListRecords() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun(s.bondID)
tc.preRun(s.bondID) resp, err := rest.GetRequest(tc.url)
} s.NoError(err)
resp, _ := rest.GetRequest(tc.url)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -443,12 +439,11 @@ func (s *IntegrationTestSuite) TestGRPCQueryGetRecordByID() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
var recordID string recordID := tc.preRun(s.bondID)
if !tc.expectErr { tc.url = fmt.Sprintf(reqURL, recordID)
recordID = tc.preRun(s.bondID)
tc.url = fmt.Sprintf(reqURL, recordID) resp, err := rest.GetRequest(tc.url)
} s.NoError(err)
resp, _ := rest.GetRequest(tc.url)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -498,11 +493,11 @@ func (s *IntegrationTestSuite) TestGRPCQueryGetRecordByBondID() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun(s.bondID)
tc.preRun(s.bondID) tc.url = fmt.Sprintf(reqURL, s.bondID)
tc.url = fmt.Sprintf(reqURL, s.bondID)
} resp, err := rest.GetRequest(tc.url)
resp, _ := rest.GetRequest(tc.url) s.NoError(err)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -552,10 +547,9 @@ func (s *IntegrationTestSuite) TestGRPCQueryGetRegistryModuleBalance() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun(s.bondID)
tc.preRun(s.bondID) resp, err := rest.GetRequest(tc.url)
} s.NoError(err)
resp, _ := rest.GetRequest(tc.url)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -603,10 +597,9 @@ func (s *IntegrationTestSuite) TestGRPCQueryNamesList() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(tc.name, func() { s.Run(tc.name, func() {
if !tc.expectErr { tc.preRun("ListNameRecords")
tc.preRun("ListNameRecords") resp, err := rest.GetRequest(tc.url)
} s.NoError(err)
resp, _ := rest.GetRequest(tc.url)
require := s.Require() require := s.Require()
if tc.expectErr { if tc.expectErr {
require.Contains(string(resp), tc.errorMsg) require.Contains(string(resp), tc.errorMsg)
@ -643,5 +636,5 @@ func createRecord(bondID string, s *IntegrationTestSuite) {
var d sdk.TxResponse var d sdk.TxResponse
err = val.ClientCtx.Codec.UnmarshalJSON(out.Bytes(), &d) err = val.ClientCtx.Codec.UnmarshalJSON(out.Bytes(), &d)
sr.NoError(err) sr.NoError(err)
sr.Zero(d.Code) sr.Zero(d.Code, d.RawLog)
} }

View File

@ -538,9 +538,6 @@ func createNameRecord(authorityName string, s *IntegrationTestSuite) {
sr.NoError(err) sr.NoError(err)
sr.Zero(d.Code) sr.Zero(d.Code)
// creating the bond
CreateBond(s)
// Get the bond-id // Get the bond-id
bondID := GetBondID(s) bondID := GetBondID(s)

View File

@ -101,42 +101,23 @@ func CreateBond(s *IntegrationTestSuite) {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
testCases := []struct { clientCtx := val.ClientCtx
name string cmd := bondcli.NewCreateBondCmd()
args []string args := []string{
err bool fmt.Sprintf("100000000000%s", s.cfg.BondDenom),
}{ fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName),
{ fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
"create bond", fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
[]string{ fmt.Sprintf("--%s=json", tmcli.OutputFlag),
fmt.Sprintf("100000000000%s", s.cfg.BondDenom), fmt.Sprintf("--%s=%s", flags.FlagFees, fmt.Sprintf("3%s", s.cfg.BondDenom)),
fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=json", tmcli.OutputFlag),
fmt.Sprintf("--%s=%s", flags.FlagFees, fmt.Sprintf("3%s", s.cfg.BondDenom)),
},
false,
},
} }
out, err := clitestutil.ExecTestCLICmd(clientCtx, cmd, args)
sr.NoError(err)
for _, tc := range testCases { var d sdk.TxResponse
s.Run(fmt.Sprintf("Case %s", tc.name), func() { err = val.ClientCtx.Codec.UnmarshalJSON(out.Bytes(), &d)
clientCtx := val.ClientCtx sr.NoError(err)
cmd := bondcli.NewCreateBondCmd() sr.Zero(d.Code)
out, err := clitestutil.ExecTestCLICmd(clientCtx, cmd, tc.args)
if tc.err {
sr.Error(err)
} else {
sr.NoError(err)
var d sdk.TxResponse
err = val.ClientCtx.Codec.UnmarshalJSON(out.Bytes(), &d)
sr.NoError(err)
sr.Zero(d.Code)
}
})
}
} }
func GetBondID(s *IntegrationTestSuite) string { func GetBondID(s *IntegrationTestSuite) string {
@ -160,6 +141,11 @@ func (s *IntegrationTestSuite) TestGetCmdSetRecord() {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
bondID := GetBondID(s)
dir, err := os.Getwd()
sr.NoError(err)
payloadPath := dir + "/service_provider_example.yml"
testCases := []struct { testCases := []struct {
name string name string
args []string args []string
@ -179,6 +165,7 @@ func (s *IntegrationTestSuite) TestGetCmdSetRecord() {
{ {
"success", "success",
[]string{ []string{
payloadPath, bondID,
fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName), fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=json", tmcli.OutputFlag), fmt.Sprintf("--%s=json", tmcli.OutputFlag),
@ -191,17 +178,6 @@ func (s *IntegrationTestSuite) TestGetCmdSetRecord() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(fmt.Sprintf("Case %s", tc.name), func() { s.Run(fmt.Sprintf("Case %s", tc.name), func() {
if !tc.err {
// create the bond
CreateBond(s)
// get the bond id from bond list
bondID := GetBondID(s)
dir, err := os.Getwd()
sr.NoError(err)
payloadPath := dir + "/service_provider_example.yml"
tc.args = append([]string{payloadPath, bondID}, tc.args...)
}
clientCtx := val.ClientCtx clientCtx := val.ClientCtx
cmd := cli.GetCmdSetRecord() cmd := cli.GetCmdSetRecord()
@ -341,9 +317,6 @@ func (s *IntegrationTestSuite) TestGetCmdSetName() {
sr.NoError(err) sr.NoError(err)
sr.Zero(d.Code) sr.Zero(d.Code)
// creating the bond
CreateBond(s)
// Get the bond-id // Get the bond-id
bondID := GetBondID(s) bondID := GetBondID(s)
@ -369,9 +342,7 @@ func (s *IntegrationTestSuite) TestGetCmdSetName() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(fmt.Sprintf("Case %s", tc.name), func() { s.Run(fmt.Sprintf("Case %s", tc.name), func() {
if !tc.err { tc.preRun(authorityName)
tc.preRun(authorityName)
}
clientCtx := val.ClientCtx clientCtx := val.ClientCtx
cmd := cli.GetCmdSetName() cmd := cli.GetCmdSetName()
@ -394,6 +365,7 @@ func (s *IntegrationTestSuite) TestGetCmdSetAuthorityBond() {
val := s.network.Validators[0] val := s.network.Validators[0]
sr := s.Require() sr := s.Require()
authorityName := "TestGetCmdSetAuthorityBond" authorityName := "TestGetCmdSetAuthorityBond"
bondID := GetBondID(s)
testCases := []struct { testCases := []struct {
name string name string
@ -417,6 +389,7 @@ func (s *IntegrationTestSuite) TestGetCmdSetAuthorityBond() {
{ {
"success with name and bond-id", "success with name and bond-id",
[]string{ []string{
authorityName, bondID,
fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName), fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=json", tmcli.OutputFlag), fmt.Sprintf("--%s=json", tmcli.OutputFlag),
@ -449,15 +422,7 @@ func (s *IntegrationTestSuite) TestGetCmdSetAuthorityBond() {
for _, tc := range testCases { for _, tc := range testCases {
s.Run(fmt.Sprintf("Case %s", tc.name), func() { s.Run(fmt.Sprintf("Case %s", tc.name), func() {
if !tc.err { tc.preRun(authorityName)
// reserve the name
tc.preRun(authorityName)
// creating the bond
CreateBond(s)
// getting the bond-id
bondID := GetBondID(s)
tc.args = append([]string{authorityName, bondID}, tc.args...)
}
clientCtx := val.ClientCtx clientCtx := val.ClientCtx
cmd := cli.GetCmdSetAuthorityBond() cmd := cli.GetCmdSetAuthorityBond()
@ -501,6 +466,7 @@ func (s *IntegrationTestSuite) TestGetCmdDeleteName() {
{ {
"successfully delete name", "successfully delete name",
[]string{ []string{
fmt.Sprintf("crn://%s/", authorityName),
fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName), fmt.Sprintf("--%s=%s", flags.FlagFrom, accountName),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=json", tmcli.OutputFlag), fmt.Sprintf("--%s=json", tmcli.OutputFlag),
@ -516,10 +482,8 @@ func (s *IntegrationTestSuite) TestGetCmdDeleteName() {
for _, tc := range testCasesForDeletingName { for _, tc := range testCasesForDeletingName {
s.Run(fmt.Sprintf("Case %s", tc.name), func() { s.Run(fmt.Sprintf("Case %s", tc.name), func() {
if !tc.err { tc.preRun(authorityName, s)
tc.preRun(authorityName, s)
tc.args = append([]string{fmt.Sprintf("crn://%s/", authorityName)}, tc.args...)
}
clientCtx := val.ClientCtx clientCtx := val.ClientCtx
cmd := cli.GetCmdDeleteName() cmd := cli.GetCmdDeleteName()
@ -574,8 +538,6 @@ func (s *IntegrationTestSuite) TestGetCmdDissociateBond() {
}, },
false, false,
func(s *IntegrationTestSuite) string { func(s *IntegrationTestSuite) string {
// create the bond
CreateBond(s)
// get the bond id from bond list // get the bond id from bond list
bondID := GetBondID(s) bondID := GetBondID(s)
dir, err := os.Getwd() dir, err := os.Getwd()
@ -816,8 +778,6 @@ func (s *IntegrationTestSuite) TestGetCmdAssociateBond() {
}, },
false, false,
func(s *IntegrationTestSuite) (string, string) { func(s *IntegrationTestSuite) (string, string) {
// create the bond
CreateBond(s)
// get the bond id from bond list // get the bond id from bond list
bondID := GetBondID(s) bondID := GetBondID(s)
dir, err := os.Getwd() dir, err := os.Getwd()

View File

@ -256,13 +256,13 @@ func (k Keeper) ProcessSetRecord(ctx sdk.Context, msg types.MsgSetRecord) (*type
pubKey, err := legacy.PubKeyFromBytes(helpers.BytesFromBase64(sig.PubKey)) pubKey, err := legacy.PubKeyFromBytes(helpers.BytesFromBase64(sig.PubKey))
if err != nil { if err != nil {
fmt.Println("Error decoding pubKey from bytes: ", err) fmt.Println("Error decoding pubKey from bytes: ", err)
return nil, errorsmod.Wrap(sdkerrors.ErrUnauthorized, "Invalid public key.") return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "Invalid public key: %s", err)
} }
sigOK := pubKey.VerifySignature(resourceSignBytes, helpers.BytesFromBase64(sig.Sig)) sigOK := pubKey.VerifySignature(resourceSignBytes, helpers.BytesFromBase64(sig.Sig))
if !sigOK { if !sigOK {
fmt.Println("Signature mismatch: ", sig.PubKey) fmt.Println("Signature mismatch: ", sig.PubKey)
return nil, errorsmod.Wrap(sdkerrors.ErrUnauthorized, "Invalid signature.") return nil, errorsmod.Wrapf(sdkerrors.ErrUnauthorized, "Invalid signature: %s", sig.PubKey)
} }
record.Owners = append(record.Owners, pubKey.Address().String()) record.Owners = append(record.Owners, pubKey.Address().String())
} }
@ -389,7 +389,7 @@ func (k Keeper) GetAttributeMapping(ctx sdk.Context, key []byte) ([]string, erro
store := ctx.KVStore(k.storeKey) store := ctx.KVStore(k.storeKey)
if !store.Has(key) { if !store.Has(key) {
return nil, fmt.Errorf("store doesn't have key") return nil, nil
} }
var recordIds []string var recordIds []string

View File

@ -25,11 +25,7 @@ func (m msgServer) SetRecord(c context.Context, msg *types.MsgSetRecord) (*types
return nil, err return nil, err
} }
record, err := m.Keeper.ProcessSetRecord(ctx, types.MsgSetRecord{ record, err := m.Keeper.ProcessSetRecord(ctx, *msg)
BondId: msg.GetBondId(),
Signer: msg.GetSigner(),
Payload: msg.GetPayload(),
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -51,7 +47,7 @@ func (m msgServer) SetRecord(c context.Context, msg *types.MsgSetRecord) (*types
return &types.MsgSetRecordResponse{Id: record.ID}, nil return &types.MsgSetRecordResponse{Id: record.ID}, nil
} }
//nolint: all // nolint: all
func (m msgServer) SetName(c context.Context, msg *types.MsgSetName) (*types.MsgSetNameResponse, error) { func (m msgServer) SetName(c context.Context, msg *types.MsgSetName) (*types.MsgSetNameResponse, error) {
ctx := sdk.UnwrapSDKContext(c) ctx := sdk.UnwrapSDKContext(c)
_, err := sdk.AccAddressFromBech32(msg.Signer) _, err := sdk.AccAddressFromBech32(msg.Signer)
@ -108,7 +104,7 @@ func (m msgServer) ReserveName(c context.Context, msg *types.MsgReserveAuthority
return &types.MsgReserveAuthorityResponse{}, nil return &types.MsgReserveAuthorityResponse{}, nil
} }
//nolint: all // nolint: all
func (m msgServer) SetAuthorityBond(c context.Context, msg *types.MsgSetAuthorityBond) (*types.MsgSetAuthorityBondResponse, error) { func (m msgServer) SetAuthorityBond(c context.Context, msg *types.MsgSetAuthorityBond) (*types.MsgSetAuthorityBondResponse, error) {
ctx := sdk.UnwrapSDKContext(c) ctx := sdk.UnwrapSDKContext(c)
_, err := sdk.AccAddressFromBech32(msg.Signer) _, err := sdk.AccAddressFromBech32(msg.Signer)
@ -185,7 +181,7 @@ func (m msgServer) RenewRecord(c context.Context, msg *types.MsgRenewRecord) (*t
return &types.MsgRenewRecordResponse{}, nil return &types.MsgRenewRecordResponse{}, nil
} }
//nolint: all // nolint: all
func (m msgServer) AssociateBond(c context.Context, msg *types.MsgAssociateBond) (*types.MsgAssociateBondResponse, error) { func (m msgServer) AssociateBond(c context.Context, msg *types.MsgAssociateBond) (*types.MsgAssociateBondResponse, error) {
ctx := sdk.UnwrapSDKContext(c) ctx := sdk.UnwrapSDKContext(c)
_, err := sdk.AccAddressFromBech32(msg.Signer) _, err := sdk.AccAddressFromBech32(msg.Signer)