Mainnet laconic setup (#510)
This commit is contained in:
parent
f55a14bd6c
commit
7797185d07
27
app/command_types.py
Normal file
27
app/command_types.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# Copyright © 2023 Cerc
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CommandOptions:
|
||||||
|
stack: str
|
||||||
|
quiet: bool = False
|
||||||
|
verbose: bool = False
|
||||||
|
dry_run: bool = False
|
||||||
|
local_stack: bool = False
|
||||||
|
debug: bool = False
|
||||||
|
continue_on_error: bool = False
|
@ -2,14 +2,16 @@ services:
|
|||||||
laconicd:
|
laconicd:
|
||||||
restart: no
|
restart: no
|
||||||
image: cerc/laconicd:local
|
image: cerc/laconicd:local
|
||||||
command: ["/bin/sh", "-c", "while :; do sleep 600; done"]
|
command: ["/bin/sh", "-c", "/opt/run-laconicd.sh"]
|
||||||
volumes:
|
volumes:
|
||||||
# The cosmos-sdk node's database directory:
|
# The cosmos-sdk node's database directory:
|
||||||
- laconicd-data:/root/.laconicd/data
|
- laconicd-data:/root/.laconicd/data
|
||||||
|
- laconicd-config:/root/.laconicd/config
|
||||||
|
- laconicd-keyring:/root/.laconicd/keyring-test
|
||||||
# TODO: look at folding these scripts into the container
|
# TODO: look at folding these scripts into the container
|
||||||
- ../config/mainnet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
|
- ../config/mainnet-laconicd/scripts/run-laconicd.sh:/opt/run-laconicd.sh
|
||||||
- ../config/mainnet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
|
- ../config/mainnet-laconicd/scripts/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
|
||||||
- ../config/mainnet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
|
- ../config/mainnet-laconicd/scripts/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
|
||||||
# TODO: determine which of the ports below is really needed
|
# TODO: determine which of the ports below is really needed
|
||||||
ports:
|
ports:
|
||||||
- "6060"
|
- "6060"
|
||||||
@ -28,3 +30,5 @@ services:
|
|||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
laconicd-data:
|
laconicd-data:
|
||||||
|
laconicd-config:
|
||||||
|
laconicd-keyring:
|
||||||
|
@ -1,118 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
|
|
||||||
# so we should have a mechanism to bundle it inside the container rather than link from here
|
|
||||||
# at deploy time.
|
|
||||||
|
|
||||||
KEY="mykey"
|
|
||||||
CHAINID="laconic_9000-1"
|
|
||||||
MONIKER="localtestnet"
|
|
||||||
KEYRING="test"
|
|
||||||
KEYALGO="eth_secp256k1"
|
|
||||||
LOGLEVEL="info"
|
|
||||||
# trace evm
|
|
||||||
TRACE="--trace"
|
|
||||||
# TRACE=""
|
|
||||||
|
|
||||||
# validate dependencies are installed
|
|
||||||
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
|
|
||||||
|
|
||||||
# remove existing daemon and client
|
|
||||||
rm -rf ~/.laconic*
|
|
||||||
|
|
||||||
make install
|
|
||||||
|
|
||||||
laconicd config keyring-backend $KEYRING
|
|
||||||
laconicd config chain-id $CHAINID
|
|
||||||
|
|
||||||
# if $KEY exists it should be deleted
|
|
||||||
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
|
|
||||||
|
|
||||||
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
|
|
||||||
laconicd init $MONIKER --chain-id $CHAINID
|
|
||||||
|
|
||||||
# Change parameter token denominations to aphoton
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
# Custom modules
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
|
|
||||||
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
|
|
||||||
echo "Setting timers for expiry tests."
|
|
||||||
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
|
|
||||||
echo "Enabling auction and setting timers."
|
|
||||||
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
fi
|
|
||||||
|
|
||||||
# increase block time (?)
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
|
|
||||||
# Set gas limit in genesis
|
|
||||||
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
|
|
||||||
|
|
||||||
# disable produce empty block
|
|
||||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
||||||
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
|
||||||
else
|
|
||||||
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $1 == "pending" ]]; then
|
|
||||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
|
||||||
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
else
|
|
||||||
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Allocate genesis accounts (cosmos formatted addresses)
|
|
||||||
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
|
|
||||||
|
|
||||||
# Sign genesis transaction
|
|
||||||
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
|
|
||||||
|
|
||||||
# Collect genesis tx
|
|
||||||
laconicd collect-gentxs
|
|
||||||
|
|
||||||
# Run this to ensure everything worked and that the genesis file is setup correctly
|
|
||||||
laconicd validate-genesis
|
|
||||||
|
|
||||||
if [[ $1 == "pending" ]]; then
|
|
||||||
echo "pending mode is on, please wait for the first block committed."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
|
|
||||||
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground
|
|
18
app/data/config/mainnet-laconicd/scripts/run-laconicd.sh
Executable file
18
app/data/config/mainnet-laconicd/scripts/run-laconicd.sh
Executable file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
#TODO: pass these in from the caller
|
||||||
|
TRACE="--trace"
|
||||||
|
LOGLEVEL="info"
|
||||||
|
|
||||||
|
laconicd start \
|
||||||
|
--pruning=nothing \
|
||||||
|
--evm.tracer=json $TRACE \
|
||||||
|
--log_level $LOGLEVEL \
|
||||||
|
--minimum-gas-prices=0.0001aphoton \
|
||||||
|
--json-rpc.api eth,txpool,personal,net,debug,web3,miner \
|
||||||
|
--api.enable \
|
||||||
|
--gql-server \
|
||||||
|
--gql-playground
|
@ -14,9 +14,18 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from app.util import get_yaml
|
from app.util import get_yaml
|
||||||
from app.deploy_types import DeployCommandContext, DeploymentContext
|
from app.deploy_types import DeployCommandContext, LaconicStackSetupCommand
|
||||||
from app.stack_state import State
|
from app.stack_state import State
|
||||||
from app.deploy_util import VolumeMapping, run_container_command
|
from app.deploy_util import VolumeMapping, run_container_command
|
||||||
|
from app.command_types import CommandOptions
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from shutil import copyfile, copytree
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import tomli
|
||||||
|
import re
|
||||||
|
|
||||||
default_spec_file_content = """config:
|
default_spec_file_content = """config:
|
||||||
node_moniker: my-node-name
|
node_moniker: my-node-name
|
||||||
@ -27,13 +36,242 @@ init_help_text = """Add helpful text here on setting config variables.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def setup(command_context: DeployCommandContext):
|
class SetupPhase(Enum):
|
||||||
node_moniker = "dbdb-node"
|
INITIALIZE = 1
|
||||||
chain_id = "laconic_81337-1"
|
JOIN = 2
|
||||||
|
CREATE = 3
|
||||||
|
ILLEGAL = 3
|
||||||
|
|
||||||
|
|
||||||
|
def _client_toml_path(network_dir: Path):
|
||||||
|
return network_dir.joinpath("config", "client.toml")
|
||||||
|
|
||||||
|
|
||||||
|
def _config_toml_path(network_dir: Path):
|
||||||
|
return network_dir.joinpath("config", "config.toml")
|
||||||
|
|
||||||
|
|
||||||
|
def _get_chain_id_from_config(network_dir: Path):
|
||||||
|
chain_id = None
|
||||||
|
with open(_client_toml_path(network_dir), "rb") as f:
|
||||||
|
toml_dict = tomli.load(f)
|
||||||
|
chain_id = toml_dict["chain-id"]
|
||||||
|
return chain_id
|
||||||
|
|
||||||
|
|
||||||
|
def _get_node_moniker_from_config(network_dir: Path):
|
||||||
|
moniker = None
|
||||||
|
with open(_client_toml_path(network_dir), "rb") as f:
|
||||||
|
toml_dict = tomli.load(f)
|
||||||
|
moniker = toml_dict["moniker"]
|
||||||
|
return moniker
|
||||||
|
|
||||||
|
|
||||||
|
def _get_node_key_from_gentx(options: CommandOptions, gentx_file_name: str):
|
||||||
|
gentx_file_path = Path(gentx_file_name)
|
||||||
|
if gentx_file_path.exists():
|
||||||
|
with open(Path(gentx_file_name), "rb") as f:
|
||||||
|
parsed_json = json.load(f)
|
||||||
|
return parsed_json['body']['messages'][0]['delegator_address']
|
||||||
|
else:
|
||||||
|
print(f"Error: gentx file: {gentx_file_name} does not exist")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def _comma_delimited_to_list(list_str: str):
|
||||||
|
return list_str.split(",") if list_str else []
|
||||||
|
|
||||||
|
|
||||||
|
def _get_node_keys_from_gentx_files(options: CommandOptions, gentx_file_list: str):
|
||||||
|
node_keys = []
|
||||||
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
|
for gentx_file in gentx_files:
|
||||||
|
node_key = _get_node_key_from_gentx(options, gentx_file)
|
||||||
|
if node_key:
|
||||||
|
node_keys.append(node_key)
|
||||||
|
return node_keys
|
||||||
|
|
||||||
|
|
||||||
|
def _copy_gentx_files(options: CommandOptions, network_dir: Path, gentx_file_list: str):
|
||||||
|
gentx_files = _comma_delimited_to_list(gentx_file_list)
|
||||||
|
for gentx_file in gentx_files:
|
||||||
|
gentx_file_path = Path(gentx_file)
|
||||||
|
copyfile(gentx_file_path, os.path.join(network_dir, "config", "gentx", os.path.basename(gentx_file_path)))
|
||||||
|
|
||||||
|
|
||||||
|
def _remove_persistent_peers(options: CommandOptions, network_dir: Path):
|
||||||
|
config_file_path = _config_toml_path(network_dir)
|
||||||
|
if not config_file_path.exists():
|
||||||
|
print("Error: config.toml not found")
|
||||||
|
sys.exit(1)
|
||||||
|
with open(config_file_path, "r") as input_file:
|
||||||
|
config_file_content = input_file.read()
|
||||||
|
persistent_peers_pattern = '^persistent_peers = "(.+?)"'
|
||||||
|
replace_with = "persistent_peers = \"\""
|
||||||
|
config_file_content = re.sub(persistent_peers_pattern, replace_with, config_file_content, flags=re.MULTILINE)
|
||||||
|
with open(config_file_path, "w") as output_file:
|
||||||
|
output_file.write(config_file_content)
|
||||||
|
|
||||||
|
|
||||||
|
def setup(command_context: DeployCommandContext, parameters: LaconicStackSetupCommand, extra_args):
|
||||||
|
|
||||||
|
options = command_context.cluster_context.options
|
||||||
|
|
||||||
|
currency = "stake" # Does this need to be a parameter?
|
||||||
|
|
||||||
|
if options.debug:
|
||||||
|
print(f"parameters: {parameters}")
|
||||||
|
|
||||||
|
phase = SetupPhase.ILLEGAL
|
||||||
|
|
||||||
|
if parameters.initialize_network:
|
||||||
|
if parameters.join_network or parameters.create_network:
|
||||||
|
print("Can't supply --join-network or --create-network with --initialize-network")
|
||||||
|
sys.exit(1)
|
||||||
|
if not parameters.chain_id:
|
||||||
|
print("--chain-id is required")
|
||||||
|
sys.exit(1)
|
||||||
|
# node_moniker must be supplied
|
||||||
|
if not parameters.node_moniker:
|
||||||
|
print("Error: --node-moniker is required")
|
||||||
|
sys.exit(1)
|
||||||
|
phase = SetupPhase.INITIALIZE
|
||||||
|
elif parameters.join_network:
|
||||||
|
if parameters.initialize_network or parameters.create_network:
|
||||||
|
print("Can't supply --initialize-network or --create-network with --join-network")
|
||||||
|
sys.exit(1)
|
||||||
|
phase = SetupPhase.JOIN
|
||||||
|
elif parameters.create_network:
|
||||||
|
if parameters.initialize_network or parameters.join_network:
|
||||||
|
print("Can't supply --initialize-network or --join-network with --create-network")
|
||||||
|
sys.exit(1)
|
||||||
|
phase = SetupPhase.CREATE
|
||||||
|
|
||||||
|
network_dir = Path(parameters.network_dir).absolute()
|
||||||
|
laconicd_home_path_in_container = "/laconicd-home"
|
||||||
mounts = [
|
mounts = [
|
||||||
VolumeMapping("./path", "~/.laconicd")
|
VolumeMapping(network_dir, laconicd_home_path_in_container)
|
||||||
]
|
]
|
||||||
output, status = run_container_command(command_context.cluster_context, "laconicd", f"laconicd init {node_moniker} --chain-id {chain_id}", mounts)
|
|
||||||
|
if phase == SetupPhase.INITIALIZE:
|
||||||
|
|
||||||
|
# We want to create the directory so if it exists that's an error
|
||||||
|
if os.path.exists(network_dir):
|
||||||
|
print(f"Error: network directory {network_dir} already exists")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
os.mkdir(network_dir)
|
||||||
|
|
||||||
|
output, status = run_container_command(
|
||||||
|
command_context,
|
||||||
|
"laconicd", f"laconicd init {parameters.node_moniker} --home {laconicd_home_path_in_container}\
|
||||||
|
--chain-id {parameters.chain_id}", mounts)
|
||||||
|
if options.debug:
|
||||||
|
print(f"Command output: {output}")
|
||||||
|
|
||||||
|
elif phase == SetupPhase.JOIN:
|
||||||
|
if not os.path.exists(network_dir):
|
||||||
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
|
sys.exit(1)
|
||||||
|
# Get the chain_id from the config file created in the INITIALIZE phase
|
||||||
|
chain_id = _get_chain_id_from_config(network_dir)
|
||||||
|
|
||||||
|
output1, status1 = run_container_command(
|
||||||
|
command_context, "laconicd", f"laconicd keys add {parameters.key_name} --home {laconicd_home_path_in_container}\
|
||||||
|
--keyring-backend test", mounts)
|
||||||
|
if options.debug:
|
||||||
|
print(f"Command output: {output1}")
|
||||||
|
output2, status2 = run_container_command(
|
||||||
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd add-genesis-account {parameters.key_name} 12900000000000000000000{currency}\
|
||||||
|
--home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts)
|
||||||
|
print(f"Command output: {output2}")
|
||||||
|
output3, status3 = run_container_command(
|
||||||
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd gentx {parameters.key_name} 90000000000{currency} --home {laconicd_home_path_in_container}\
|
||||||
|
--chain-id {chain_id} --keyring-backend test",
|
||||||
|
mounts)
|
||||||
|
print(f"Command output: {output3}")
|
||||||
|
output4, status4 = run_container_command(
|
||||||
|
command_context,
|
||||||
|
"laconicd",
|
||||||
|
f"laconicd keys show {parameters.key_name} -a --home {laconicd_home_path_in_container} --keyring-backend test",
|
||||||
|
mounts)
|
||||||
|
print(f"Command output: {output4}")
|
||||||
|
|
||||||
|
elif phase == SetupPhase.CREATE:
|
||||||
|
if not os.path.exists(network_dir):
|
||||||
|
print(f"Error: network directory {network_dir} doesn't exist")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# In the CREATE phase, we are either a "coordinator" node, generating the genesis.json file ourselves
|
||||||
|
# OR we are a "not-coordinator" node, consuming a genesis file we got from the coordinator node.
|
||||||
|
if parameters.genesis_file:
|
||||||
|
# We got the genesis file from elsewhere
|
||||||
|
# Copy it into our network dir
|
||||||
|
genesis_file_path = Path(parameters.genesis_file)
|
||||||
|
if not os.path.exists(genesis_file_path):
|
||||||
|
print(f"Error: supplied genesis file: {parameters.genesis_file} does not exist.")
|
||||||
|
sys.exit(1)
|
||||||
|
copyfile(genesis_file_path, os.path.join(network_dir, "config", os.path.basename(genesis_file_path)))
|
||||||
|
else:
|
||||||
|
# We're generating the genesis file
|
||||||
|
# First look in the supplied gentx files for the other nodes' keys
|
||||||
|
other_node_keys = _get_node_keys_from_gentx_files(options, parameters.gentx_file_list)
|
||||||
|
# Add those keys to our genesis, with balances we determine here (why?)
|
||||||
|
for other_node_key in other_node_keys:
|
||||||
|
outputk, statusk = run_container_command(
|
||||||
|
command_context, "laconicd", f"laconicd add-genesis-account {other_node_key} 12900000000000000000000{currency}\
|
||||||
|
--home {laconicd_home_path_in_container} --keyring-backend test", mounts)
|
||||||
|
print(f"Command output: {outputk}")
|
||||||
|
# Copy the gentx json files into our network dir
|
||||||
|
_copy_gentx_files(options, network_dir, parameters.gentx_file_list)
|
||||||
|
# Now we can run collect-gentxs
|
||||||
|
output1, status1 = run_container_command(
|
||||||
|
command_context, "laconicd", f"laconicd collect-gentxs --home {laconicd_home_path_in_container}", mounts)
|
||||||
|
print(f"Command output: {output1}")
|
||||||
|
print(f"Generated genesis file, please copy to other nodes as required: \
|
||||||
|
{os.path.join(network_dir, 'config', 'genesis.json')}")
|
||||||
|
# Last thing, collect-gentxs puts a likely bogus set of persistent_peers in config.toml so we remove that now
|
||||||
|
_remove_persistent_peers(options, network_dir)
|
||||||
|
# In both cases we validate the genesis file now
|
||||||
|
output2, status1 = run_container_command(
|
||||||
|
command_context, "laconicd", f"laconicd validate-genesis --home {laconicd_home_path_in_container}", mounts)
|
||||||
|
print(f"Command output: {output2}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
print("Illegal parameters supplied")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def create(command_context: DeployCommandContext, extra_args):
|
||||||
|
network_dir = extra_args
|
||||||
|
if network_dir is None:
|
||||||
|
print("Error: --network-dir must be supplied")
|
||||||
|
sys.exit(1)
|
||||||
|
network_dir_path = Path(network_dir)
|
||||||
|
if not (network_dir_path.exists() and network_dir_path.is_dir()):
|
||||||
|
print(f"Error: supplied network directory does not exist: {network_dir}")
|
||||||
|
sys.exit(1)
|
||||||
|
config_dir_path = network_dir_path.joinpath("config")
|
||||||
|
if not (config_dir_path.exists() and config_dir_path.is_dir()):
|
||||||
|
print(f"Error: supplied network directory does not contain a config directory: {config_dir_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
data_dir_path = network_dir_path.joinpath("data")
|
||||||
|
if not (data_dir_path.exists() and data_dir_path.is_dir()):
|
||||||
|
print(f"Error: supplied network directory does not contain a data directory: {data_dir_path}")
|
||||||
|
sys.exit(1)
|
||||||
|
# Copy the network directory contents into our deployment
|
||||||
|
# TODO: change this to work with non local paths
|
||||||
|
deployment_config_dir = command_context.deployment_dir.joinpath("data", "laconicd-config")
|
||||||
|
copytree(config_dir_path, deployment_config_dir, dirs_exist_ok=True)
|
||||||
|
# Copy the data directory contents into our deployment
|
||||||
|
# TODO: change this to work with non local paths
|
||||||
|
deployment_data_dir = command_context.deployment_dir.joinpath("data", "laconicd-data")
|
||||||
|
copytree(data_dir_path, deployment_data_dir, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
def init(command_context: DeployCommandContext):
|
def init(command_context: DeployCommandContext):
|
||||||
|
@ -14,10 +14,9 @@
|
|||||||
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from app.util import get_yaml
|
from app.util import get_yaml
|
||||||
from app.deploy_types import DeployCommandContext, DeploymentContext
|
from app.deploy_types import DeployCommandContext
|
||||||
from app.stack_state import State
|
from app.stack_state import State
|
||||||
from app.deploy_util import VolumeMapping, run_container_command
|
from app.deploy_util import VolumeMapping, run_container_command
|
||||||
import os
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
default_spec_file_content = """config:
|
default_spec_file_content = """config:
|
||||||
@ -27,9 +26,10 @@ default_spec_file_content = """config:
|
|||||||
init_help_text = """Add helpful text here on setting config variables.
|
init_help_text = """Add helpful text here on setting config variables.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
# Output a known string to a know file in the bind mounted directory ./container-output-dir
|
# Output a known string to a know file in the bind mounted directory ./container-output-dir
|
||||||
# for test purposes -- test checks that the file was written.
|
# for test purposes -- test checks that the file was written.
|
||||||
def setup(command_context: DeployCommandContext, extra_args):
|
def setup(command_context: DeployCommandContext, parameters, extra_args):
|
||||||
host_directory = "./container-output-dir"
|
host_directory = "./container-output-dir"
|
||||||
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
host_directory_absolute = Path(extra_args[0]).absolute().joinpath(host_directory)
|
||||||
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
host_directory_absolute.mkdir(parents=True, exist_ok=True)
|
||||||
@ -45,7 +45,7 @@ def init(command_context: DeployCommandContext):
|
|||||||
return yaml.load(default_spec_file_content)
|
return yaml.load(default_spec_file_content)
|
||||||
|
|
||||||
|
|
||||||
def create(command_context: DeployCommandContext):
|
def create(command_context: DeployCommandContext, extra_args):
|
||||||
data = "create-command-output-data"
|
data = "create-command-output-data"
|
||||||
output_file_path = command_context.deployment_dir.joinpath("create-file")
|
output_file_path = command_context.deployment_dir.joinpath("create-file")
|
||||||
with open(output_file_path, 'w+') as output_file:
|
with open(output_file_path, 'w+') as output_file:
|
||||||
|
@ -312,7 +312,7 @@ def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
|
|||||||
if ctx.verbose:
|
if ctx.verbose:
|
||||||
print(f"files: {compose_files}")
|
print(f"files: {compose_files}")
|
||||||
|
|
||||||
return ClusterContext(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
|
return ClusterContext(ctx, cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_new_format(old_pod_array):
|
def _convert_to_new_format(old_pod_array):
|
||||||
|
@ -17,9 +17,11 @@ from typing import List
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from python_on_whales import DockerClient
|
from python_on_whales import DockerClient
|
||||||
|
from app.command_types import CommandOptions
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ClusterContext:
|
class ClusterContext:
|
||||||
|
options: CommandOptions # TODO: this should be in its own object not stuffed in here
|
||||||
cluster: str
|
cluster: str
|
||||||
compose_files: List[str]
|
compose_files: List[str]
|
||||||
pre_start_commands: List[str]
|
pre_start_commands: List[str]
|
||||||
@ -45,3 +47,21 @@ class DeploymentContext:
|
|||||||
class VolumeMapping:
|
class VolumeMapping:
|
||||||
host_path: str
|
host_path: str
|
||||||
container_path: str
|
container_path: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LaconicStackSetupCommand:
|
||||||
|
chain_id: str
|
||||||
|
node_moniker: str
|
||||||
|
key_name: str
|
||||||
|
initialize_network: bool
|
||||||
|
join_network: bool
|
||||||
|
create_network: bool
|
||||||
|
gentx_file_list: str
|
||||||
|
genesis_file: str
|
||||||
|
network_dir: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LaconicStackCreateCommand:
|
||||||
|
network_dir: str
|
@ -15,12 +15,11 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
from typing import List
|
from typing import List
|
||||||
from dataclasses import dataclass
|
|
||||||
from app.deploy_types import DeployCommandContext, VolumeMapping
|
from app.deploy_types import DeployCommandContext, VolumeMapping
|
||||||
from app.util import get_parsed_stack_config, get_yaml, get_compose_file_dir
|
from app.util import get_parsed_stack_config, get_yaml, get_compose_file_dir
|
||||||
|
|
||||||
|
|
||||||
def _container_image_from_service(stack:str, service: str):
|
def _container_image_from_service(stack :str, service: str):
|
||||||
# Parse the compose files looking for the image name of the specified service
|
# Parse the compose files looking for the image name of the specified service
|
||||||
image_name = None
|
image_name = None
|
||||||
parsed_stack = get_parsed_stack_config(stack)
|
parsed_stack = get_parsed_stack_config(stack)
|
||||||
@ -39,7 +38,7 @@ def _container_image_from_service(stack:str, service: str):
|
|||||||
|
|
||||||
|
|
||||||
def _volumes_to_docker(mounts: List[VolumeMapping]):
|
def _volumes_to_docker(mounts: List[VolumeMapping]):
|
||||||
# Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")]
|
# Example from doc: [("/", "/host"), ("/etc/hosts", "/etc/hosts", "rw")]
|
||||||
result = []
|
result = []
|
||||||
for mount in mounts:
|
for mount in mounts:
|
||||||
docker_volume = (mount.host_path, mount.container_path)
|
docker_volume = (mount.host_path, mount.container_path)
|
||||||
@ -51,6 +50,13 @@ def run_container_command(ctx: DeployCommandContext, service: str, command: str,
|
|||||||
docker = ctx.docker
|
docker = ctx.docker
|
||||||
container_image = _container_image_from_service(ctx.stack, service)
|
container_image = _container_image_from_service(ctx.stack, service)
|
||||||
docker_volumes = _volumes_to_docker(mounts)
|
docker_volumes = _volumes_to_docker(mounts)
|
||||||
docker_output = docker.run(container_image, ["-c", command], entrypoint="bash", volumes=docker_volumes)
|
if ctx.cluster_context.options.debug:
|
||||||
|
print(f"Running this command in {service} container: {command}")
|
||||||
|
docker_output = docker.run(
|
||||||
|
container_image,
|
||||||
|
["-c", command], entrypoint="sh",
|
||||||
|
user=f"{os.getuid()}:{os.getgid()}",
|
||||||
|
volumes=docker_volumes
|
||||||
|
)
|
||||||
# There doesn't seem to be a way to get an exit code from docker.run()
|
# There doesn't seem to be a way to get an exit code from docker.run()
|
||||||
return (docker_output, 0)
|
return (docker_output, 0)
|
||||||
|
@ -19,8 +19,9 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile, copytree
|
from shutil import copyfile, copytree
|
||||||
import sys
|
import sys
|
||||||
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml, get_compose_file_dir
|
from app.util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options, get_yaml
|
||||||
from app.deploy_types import DeploymentContext, DeployCommandContext
|
from app.util import get_compose_file_dir
|
||||||
|
from app.deploy_types import DeploymentContext, LaconicStackSetupCommand
|
||||||
|
|
||||||
|
|
||||||
def _make_default_deployment_dir():
|
def _make_default_deployment_dir():
|
||||||
@ -118,7 +119,7 @@ def call_stack_deploy_init(deploy_command_context):
|
|||||||
|
|
||||||
|
|
||||||
# TODO: fold this with function above
|
# TODO: fold this with function above
|
||||||
def call_stack_deploy_setup(deploy_command_context, extra_args):
|
def call_stack_deploy_setup(deploy_command_context, parameters: LaconicStackSetupCommand, extra_args):
|
||||||
# Link with the python file in the stack
|
# Link with the python file in the stack
|
||||||
# Call a function in it
|
# Call a function in it
|
||||||
# If no function found, return None
|
# If no function found, return None
|
||||||
@ -127,13 +128,13 @@ def call_stack_deploy_setup(deploy_command_context, extra_args):
|
|||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
return imported_stack.setup(deploy_command_context, extra_args)
|
return imported_stack.setup(deploy_command_context, parameters, extra_args)
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
# TODO: fold this with function above
|
# TODO: fold this with function above
|
||||||
def call_stack_deploy_create(deployment_context):
|
def call_stack_deploy_create(deployment_context, extra_args):
|
||||||
# Link with the python file in the stack
|
# Link with the python file in the stack
|
||||||
# Call a function in it
|
# Call a function in it
|
||||||
# If no function found, return None
|
# If no function found, return None
|
||||||
@ -142,7 +143,7 @@ def call_stack_deploy_create(deployment_context):
|
|||||||
spec = util.spec_from_file_location("commands", python_file_path)
|
spec = util.spec_from_file_location("commands", python_file_path)
|
||||||
imported_stack = util.module_from_spec(spec)
|
imported_stack = util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(imported_stack)
|
spec.loader.exec_module(imported_stack)
|
||||||
return imported_stack.create(deployment_context)
|
return imported_stack.create(deployment_context, extra_args)
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -197,8 +198,10 @@ def init(ctx, output):
|
|||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
|
||||||
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
@click.option("--deployment-dir", help="Create deployment files in this directory")
|
||||||
|
# TODO: Hack
|
||||||
|
@click.option("--network-dir", help="Network configuration supplied in this directory")
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def create(ctx, spec_file, deployment_dir):
|
def create(ctx, spec_file, deployment_dir, network_dir):
|
||||||
# This function fails with a useful error message if the file doens't exist
|
# This function fails with a useful error message if the file doens't exist
|
||||||
parsed_spec = get_parsed_deployment_spec(spec_file)
|
parsed_spec = get_parsed_deployment_spec(spec_file)
|
||||||
stack_name = parsed_spec['stack']
|
stack_name = parsed_spec['stack']
|
||||||
@ -246,16 +249,26 @@ def create(ctx, spec_file, deployment_dir):
|
|||||||
deployment_command_context = ctx.obj
|
deployment_command_context = ctx.obj
|
||||||
deployment_command_context.stack = stack_name
|
deployment_command_context.stack = stack_name
|
||||||
deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context)
|
deployment_context = DeploymentContext(Path(deployment_dir), deployment_command_context)
|
||||||
call_stack_deploy_create(deployment_context)
|
call_stack_deploy_create(deployment_context, network_dir)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: this code should be in the stack .py files but
|
||||||
|
# we haven't yet figured out how to integrate click across
|
||||||
|
# the plugin boundary
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("--node-moniker", help="Help goes here")
|
@click.option("--node-moniker", help="Moniker for this node")
|
||||||
@click.option("--key-name", help="Help goes here")
|
@click.option("--chain-id", help="The new chain id")
|
||||||
@click.option("--initialize-network", is_flag=True, default=False, help="Help goes here")
|
@click.option("--key-name", help="Name for new node key")
|
||||||
@click.option("--join-network", is_flag=True, default=False, help="Help goes here")
|
@click.option("--gentx-files", help="List of comma-delimited gentx filenames from other nodes")
|
||||||
@click.option("--create-network", is_flag=True, default=False, help="Help goes here")
|
@click.option("--genesis-file", help="Genesis file for the network")
|
||||||
|
@click.option("--initialize-network", is_flag=True, default=False, help="Initialize phase")
|
||||||
|
@click.option("--join-network", is_flag=True, default=False, help="Join phase")
|
||||||
|
@click.option("--create-network", is_flag=True, default=False, help="Create phase")
|
||||||
|
@click.option("--network-dir", help="Directory for network files")
|
||||||
@click.argument('extra_args', nargs=-1)
|
@click.argument('extra_args', nargs=-1)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def setup(ctx, node_moniker, key_name, initialize_network, join_network, create_network, extra_args):
|
def setup(ctx, node_moniker, chain_id, key_name, gentx_files, genesis_file, initialize_network, join_network, create_network,
|
||||||
call_stack_deploy_setup(ctx.obj, extra_args)
|
network_dir, extra_args):
|
||||||
|
parmeters = LaconicStackSetupCommand(chain_id, node_moniker, key_name, initialize_network, join_network, create_network,
|
||||||
|
gentx_files, genesis_file, network_dir)
|
||||||
|
call_stack_deploy_setup(ctx.obj, parmeters, extra_args)
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
class State(Enum):
|
class State(Enum):
|
||||||
CREATED = 1
|
CREATED = 1
|
||||||
CONFIGURED = 2
|
CONFIGURED = 2
|
||||||
|
14
cli.py
14
cli.py
@ -16,6 +16,7 @@
|
|||||||
import click
|
import click
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from app.command_types import CommandOptions
|
||||||
from app import setup_repositories
|
from app import setup_repositories
|
||||||
from app import build_containers
|
from app import build_containers
|
||||||
from app import build_npms
|
from app import build_npms
|
||||||
@ -26,17 +27,6 @@ from app import deployment
|
|||||||
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
|
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class Options:
|
|
||||||
stack: str
|
|
||||||
quiet: bool = False
|
|
||||||
verbose: bool = False
|
|
||||||
dry_run: bool = False
|
|
||||||
local_stack: bool = False
|
|
||||||
debug: bool = False
|
|
||||||
continue_on_error: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
@click.group(context_settings=CONTEXT_SETTINGS)
|
@click.group(context_settings=CONTEXT_SETTINGS)
|
||||||
@click.option('--stack', help="specify a stack to build/deploy")
|
@click.option('--stack', help="specify a stack to build/deploy")
|
||||||
@click.option('--quiet', is_flag=True, default=False)
|
@click.option('--quiet', is_flag=True, default=False)
|
||||||
@ -49,7 +39,7 @@ class Options:
|
|||||||
@click.pass_context
|
@click.pass_context
|
||||||
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
|
def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
|
||||||
"""Laconic Stack Orchestrator"""
|
"""Laconic Stack Orchestrator"""
|
||||||
ctx.obj = Options(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
|
ctx.obj = CommandOptions(stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error)
|
||||||
|
|
||||||
|
|
||||||
cli.add_command(setup_repositories.command, "setup-repositories")
|
cli.add_command(setup_repositories.command, "setup-repositories")
|
||||||
|
@ -6,3 +6,4 @@ click>=8.1.6
|
|||||||
PyYAML>=6.0.1
|
PyYAML>=6.0.1
|
||||||
ruamel.yaml>=0.17.32
|
ruamel.yaml>=0.17.32
|
||||||
pydantic==1.10.9
|
pydantic==1.10.9
|
||||||
|
tomli==2.0.1
|
||||||
|
58
tests/laconic-network/run-test.sh
Executable file
58
tests/laconic-network/run-test.sh
Executable file
@ -0,0 +1,58 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
|
||||||
|
set -x
|
||||||
|
fi
|
||||||
|
|
||||||
|
node_count=4
|
||||||
|
node_dir_prefix="laconic-network-dir"
|
||||||
|
chain_id="laconic_81337-6"
|
||||||
|
node_moniker_prefix="node"
|
||||||
|
|
||||||
|
echo "Deleting any existing network directories..."
|
||||||
|
for (( i=1 ; i<=$node_count ; i++ ));
|
||||||
|
do
|
||||||
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
|
if [[ -d $node_network_dir ]]; then
|
||||||
|
echo "Deleting ${node_network_dir}"
|
||||||
|
rm -rf ${node_network_dir}
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Initalizing ${node_count} nodes networks..."
|
||||||
|
for (( i=1 ; i<=$node_count ; i++ ));
|
||||||
|
do
|
||||||
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
|
node_moniker=${node_moniker_prefix}${i}
|
||||||
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --initialize-network --chain-id ${chain_id} --node-moniker ${node_moniker}
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Joining ${node_count} nodes to the network..."
|
||||||
|
for (( i=1 ; i<=$node_count ; i++ ));
|
||||||
|
do
|
||||||
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
|
node_moniker=${node_moniker_prefix}${i}
|
||||||
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --join-network --key-name ${node_moniker}
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Merging ${node_count} nodes genesis txns..."
|
||||||
|
gentx_files=""
|
||||||
|
delimeter=""
|
||||||
|
# Note: start at node 2 here because we're going to copy to node 1
|
||||||
|
for (( i=2 ; i<=$node_count ; i++ ));
|
||||||
|
do
|
||||||
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
|
node_gentx_file=$(ls ${node_network_dir}/config/gentx/*.json)
|
||||||
|
gentx_files+=${delimeter}${node_gentx_file}
|
||||||
|
delimeter=","
|
||||||
|
done
|
||||||
|
# Generate the genesis file on node 1
|
||||||
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_dir_prefix}1 --create-network --gentx-files ${gentx_files}
|
||||||
|
genesis_file=${node_dir_prefix}1/config/genesis.json
|
||||||
|
# Now import the genesis file to the other nodes
|
||||||
|
for (( i=2 ; i<=$node_count ; i++ ));
|
||||||
|
do
|
||||||
|
echo "Importing genesis.json into node ${i}"
|
||||||
|
node_network_dir=${node_dir_prefix}${i}
|
||||||
|
laconic-so --stack mainnet-laconic deploy setup --network-dir ${node_network_dir} --create-network --genesis-file ${genesis_file}
|
||||||
|
done
|
Loading…
Reference in New Issue
Block a user