Merge main

This commit is contained in:
David Boreham 2023-06-29 15:14:40 -06:00
commit 582b92d277
81 changed files with 1354 additions and 284 deletions

View File

@ -15,7 +15,8 @@
import os import os
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from .deploy_system import get_stack_status from .deploy import get_stack_status
from decouple import config
def get_stack(config, stack): def get_stack(config, stack):
@ -69,3 +70,10 @@ class package_registry_stack(base_stack):
def get_url(self): def get_url(self):
return self.url return self.url
def get_npm_registry_url():
# If an auth token is not defined, we assume the default should be the cerc registry
# If an auth token is defined, we assume the local gitea should be used.
default_npm_registry_url = "http://gitea.local:3000/api/packages/cerc-io/npm/" if config("CERC_NPM_AUTH_TOKEN", default=None) else "https://git.vdb.to/api/packages/cerc-io/npm/"
return config("CERC_NPM_REGISTRY_URL", default=default_npm_registry_url)

View File

@ -28,6 +28,7 @@ import click
import importlib.resources import importlib.resources
from pathlib import Path from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config from .util import include_exclude_check, get_parsed_stack_config
from .base import get_npm_registry_url
# TODO: find a place for this # TODO: find a place for this
# epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)" # epilog="Config provided either in .env or settings.ini or env vars: CERC_REPO_BASE_DIR (defaults to ~/cerc)"
@ -84,7 +85,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
# TODO: make this configurable # TODO: make this configurable
container_build_env = { container_build_env = {
"CERC_NPM_REGISTRY_URL": config("CERC_NPM_REGISTRY_URL", default="http://gitea.local:3000/api/packages/cerc-io/npm/"), "CERC_NPM_REGISTRY_URL": get_npm_registry_url(),
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""), "CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
"CERC_REPO_BASE_DIR": dev_root_path, "CERC_REPO_BASE_DIR": dev_root_path,
"CERC_CONTAINER_BASE_DIR": container_build_dir, "CERC_CONTAINER_BASE_DIR": container_build_dir,

View File

@ -22,7 +22,7 @@ services:
- SYS_PTRACE - SYS_PTRACE
environment: environment:
CERC_REMOTE_DEBUG: "true" CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: "detect" CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect}
CERC_STATEDIFF_DB_NODE_ID: 1 CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file: env_file:

View File

@ -25,3 +25,6 @@ services:
image: cerc/laconic-registry-cli:local image: cerc/laconic-registry-cli:local
volumes: volumes:
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml - ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
volumes:
laconicd-data:

View File

@ -29,7 +29,6 @@ services:
image: cerc/fixturenet-plugeth-plugeth:local image: cerc/fixturenet-plugeth-plugeth:local
volumes: volumes:
- fixturenet_plugeth_geth_1_data:/root/ethdata - fixturenet_plugeth_geth_1_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
healthcheck: healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8545/"] test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8545/"]
interval: 30s interval: 30s
@ -61,7 +60,6 @@ services:
- fixturenet-eth-bootnode-geth - fixturenet-eth-bootnode-geth
volumes: volumes:
- fixturenet_plugeth_geth_2_data:/root/ethdata - fixturenet_plugeth_geth_2_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
fixturenet-eth-bootnode-lighthouse: fixturenet-eth-bootnode-lighthouse:
restart: always restart: always

View File

@ -7,11 +7,9 @@ services:
condition: service_healthy condition: service_healthy
image: cerc/ipld-eth-server:local image: cerc/ipld-eth-server:local
environment: environment:
IPLD_SERVER_GRAPHQL: "true" SERVER_HTTP_PATH: 0.0.0.0:8081
IPLD_POSTGRAPHILEPATH: http://graphql:5000 SERVER_GRAPHQL: "true"
ETH_SERVER_HTTPPATH: 0.0.0.0:8081 SERVER_GRAPHQLPATH: 0.0.0.0:8082
ETH_SERVER_GRAPHQL: "true"
ETH_SERVER_GRAPHQLPATH: 0.0.0.0:8082
VDB_COMMAND: "serve" VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json" ETH_CHAIN_CONFIG: "/tmp/chain.json"
DATABASE_NAME: cerc_testing DATABASE_NAME: cerc_testing

View File

@ -0,0 +1,8 @@
version: "3.2"
services:
lasso:
image: cerc/lasso:local
restart: always
ports:
- "0.0.0.0:3000:3000"

View File

@ -0,0 +1,17 @@
version: "3.8"
services:
go-opera:
restart: unless-stopped
image: cerc/go-opera:local
entrypoint: ["sh", "/docker-entrypoint-scripts.d/start-node.sh"]
volumes:
- ../config/mainnet-go-opera/start-node.sh:/docker-entrypoint-scripts.d/start-node.sh
# TODO: ports taken from dockerfile, determine which are needed
ports:
- "5050:5050" # p2p port, needed
- "5050:5050/udp"
- "18545:18545" # http rpc port
- "18546:18546" # websockets rpc port
#- "18547" # unknown
#- "19090" # unknown

View File

@ -0,0 +1,30 @@
services:
laconicd:
restart: no
image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
# The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd/data
# TODO: look at folding these scripts into the container
- ../config/mainnet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/mainnet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
- ../config/mainnet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
# TODO: determine which of the ports below is really needed
ports:
- "6060"
- "26657"
- "26656"
- "9473:9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"
cli:
image: cerc/laconic-registry-cli:local
volumes:
- ../config/mainnet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml
volumes:
laconicd-data:

View File

@ -23,7 +23,7 @@ services:
- peers_ids:/peers - peers_ids:/peers
- mobymask_deployment:/server - mobymask_deployment:/server
ports: ports:
- "0.0.0.0:3002:80" - "127.0.0.1:3002:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"] test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s interval: 20s
@ -55,7 +55,7 @@ services:
- peers_ids:/peers - peers_ids:/peers
- mobymask_deployment:/server - mobymask_deployment:/server
ports: ports:
- "0.0.0.0:3004:80" - "127.0.0.1:3004:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"] test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s interval: 20s

View File

@ -1,8 +1,9 @@
version: '3.2' version: '3.2'
services: services:
# Builds and serves the peer-test react-app
peer-test-app: peer-test-app:
# Builds and serves the peer-test react-app restart: unless-stopped
image: cerc/react-peer:local image: cerc/react-peer:local
working_dir: /scripts working_dir: /scripts
env_file: env_file:
@ -17,7 +18,7 @@ services:
- ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh - ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh
- peers_ids:/peers - peers_ids:/peers
ports: ports:
- "0.0.0.0:3003:80" - "127.0.0.1:3003:80"
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "80"] test: ["CMD", "nc", "-v", "localhost", "80"]
interval: 20s interval: 20s

View File

@ -0,0 +1,35 @@
version: "3.8"
services:
reth:
restart: unless-stopped
hostname: reth
image: cerc/reth:local
entrypoint: ["sh", "/docker-entrypoint-scripts.d/start-reth.sh"]
volumes:
- ../config/reth/start-reth.sh:/docker-entrypoint-scripts.d/start-reth.sh
- reth_data:/root/.local/share/reth
- shared_data:/root/.shared_data
ports:
- "8545:8545" # http rpc
- "8546:8546" # ws rpc
- "30303:30303" # network listening port
- "30303:30303/udp"
- "8551" # consensus auth
lighthouse:
restart: unless-stopped
hostname: lighthouse
image: cerc/lighthouse:local
entrypoint: ["sh", "/docker-entrypoint-scripts.d/start-lighthouse.sh"]
volumes:
- ../config/reth/start-lighthouse.sh:/docker-entrypoint-scripts.d/start-lighthouse.sh
- lighthouse_data:/root/.lighthouse/mainnet
- shared_data:/root/.shared_data
ports:
- "8001"
volumes:
reth_data:
lighthouse_data:
shared_data:

View File

@ -5,7 +5,7 @@ services:
environment: environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG} CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
volumes: volumes:
- test-data:/var - test-data:/data
ports: ports:
- "80" - "80"

View File

@ -14,7 +14,7 @@ services:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh - ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- mobymask_watcher_db_data:/var/lib/postgresql/data - mobymask_watcher_db_data:/var/lib/postgresql/data
ports: ports:
- "0.0.0.0:15432:5432" - "127.0.0.1:15432:5432"
healthcheck: healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"] test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s interval: 20s
@ -95,9 +95,9 @@ services:
- mobymask_deployment:/server - mobymask_deployment:/server
# Expose GQL, metrics and relay node ports # Expose GQL, metrics and relay node ports
ports: ports:
- "0.0.0.0:3001:3001" - "127.0.0.1:3001:3001"
- "0.0.0.0:9001:9001" - "127.0.0.1:9001:9001"
- "0.0.0.0:9090:9090" - "127.0.0.1:9090:9090"
healthcheck: healthcheck:
test: ["CMD", "busybox", "nc", "localhost", "9090"] test: ["CMD", "busybox", "nc", "localhost", "9090"]
interval: 20s interval: 20s

View File

@ -17,7 +17,8 @@ CERC_STATEDIFF_DB_PORT=5432
CERC_STATEDIFF_DB_NAME="cerc_testing" CERC_STATEDIFF_DB_NAME="cerc_testing"
CERC_STATEDIFF_DB_USER="vdbm" CERC_STATEDIFF_DB_USER="vdbm"
CERC_STATEDIFF_DB_PASSWORD="password" CERC_STATEDIFF_DB_PASSWORD="password"
CERC_STATEDIFF_DB_GOOSE_MIN_VER=23 CERC_STATEDIFF_DB_GOOSE_MIN_VER=${CERC_STATEDIFF_DB_GOOSE_MIN_VER:-18}
CERC_STATEDIFF_DB_LOG_STATEMENTS="false" CERC_STATEDIFF_DB_LOG_STATEMENTS="false"
CERC_STATEDIFF_WORKERS=2
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5" CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,7 @@
#!/bin/bash
# download genesis file
wget https://download.fantom.network/mainnet-109331-no-history.g
./opera --genesis=mainnet-109331-no-history.g --db.preset ldb-1 --syncmode snap --http --http.addr="0.0.0.0" --http.corsdomain="*" --http.api=eth,web3,net,txpool,ftm --ws --ws.addr="0.0.0.0" --ws.origins="*" --ws.api=eth,web3,net,txpool,ftm --cache 8192
#tail -f /dev/null

View File

@ -0,0 +1,118 @@
#!/bin/bash
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
# so we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
KEY="mykey"
CHAINID="laconic_9000-1"
MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# trace evm
TRACE="--trace"
# TRACE=""
# validate dependencies are installed
command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
# remove existing daemon and client
rm -rf ~/.laconic*
make install
laconicd config keyring-backend $KEYRING
laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["time_iota_ms"]="1000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Set gas limit in genesis
cat $HOME/.laconicd/config/genesis.json | jq '.consensus_params["block"]["max_gas"]="10000000"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# disable produce empty block
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks = true/create_empty_blocks = false/g' $HOME/.laconicd/config/config.toml
fi
if [[ $1 == "pending" ]]; then
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i '' 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
else
sed -i 's/create_empty_blocks_interval = "0s"/create_empty_blocks_interval = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose = "3s"/timeout_propose = "30s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_propose_delta = "500ms"/timeout_propose_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote = "1s"/timeout_prevote = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_prevote_delta = "500ms"/timeout_prevote_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit = "1s"/timeout_precommit = "10s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_precommit_delta = "500ms"/timeout_precommit_delta = "5s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_commit = "5s"/timeout_commit = "150s"/g' $HOME/.laconicd/config/config.toml
sed -i 's/timeout_broadcast_tx_commit = "10s"/timeout_broadcast_tx_commit = "150s"/g' $HOME/.laconicd/config/config.toml
fi
fi
# Allocate genesis accounts (cosmos formatted addresses)
laconicd add-genesis-account $KEY 100000000000000000000000000aphoton --keyring-backend $KEYRING
# Sign genesis transaction
laconicd gentx $KEY 1000000000000000000000aphoton --keyring-backend $KEYRING --chain-id $CHAINID
# Collect genesis tx
laconicd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
laconicd validate-genesis
if [[ $1 == "pending" ]]; then
echo "pending mode is on, please wait for the first block committed."
fi
# Start the node (remove the --pruning=nothing flag if historical queries are not needed)
laconicd start --pruning=nothing --evm.tracer=json $TRACE --log_level $LOGLEVEL --minimum-gas-prices=0.0001aphoton --json-rpc.api eth,txpool,personal,net,debug,web3,miner --api.enable --gql-server --gql-playground

View File

@ -0,0 +1,2 @@
#!/bin/sh
laconicd keys show mykey | grep address | cut -d ' ' -f 3

View File

@ -0,0 +1,2 @@
#!/bin/sh
echo y | laconicd keys export mykey --unarmored-hex --unsafe

View File

@ -0,0 +1,9 @@
services:
cns:
restEndpoint: 'http://laconicd:1317'
gqlEndpoint: 'http://laconicd:9473/api'
userKey: REPLACE_WITH_MYKEY
bondId:
chainId: laconic_9000-1
gas: 250000
fees: 200000aphoton

View File

@ -0,0 +1,16 @@
#!/bin/bash
# Wait for reth container to create jwt auth token
while [ ! -f /root/.shared_data/jwt.hex ]; do
echo "Jwt auth token not found, sleeping for 5s..."
sleep 5
done
echo "Jwt token found. Starting Lighthouse..."
export RUST_LOG=info
lighthouse bn \
--network mainnet \
--execution-endpoint http://reth:8551 \
--execution-jwt /root/.shared_data/jwt.hex \
--checkpoint-sync-url https://mainnet.checkpoint.sigp.io \
--disable-deposit-contract-sync

View File

@ -0,0 +1,24 @@
#!/bin/bash
# generate jwt token for reth/lighthouse authentication
echo "Installing OpenSSL..."
apt update
apt install openssl
echo "Generating jwt token for lighthouse auth..."
openssl rand -hex 32 | tr -d "\n" | tee /root/.shared_data/jwt.hex
# start reth
echo "Starting Reth..."
export RUST_LOG=info
reth node \
--authrpc.jwtsecret /root/.shared_data/jwt.hex \
--authrpc.addr 0.0.0.0 \
--authrpc.port 8551 \
--http \
--http.addr 0.0.0.0 \
--http.corsdomain * \
--http.api eth,web3,net,rpc \
--ws \
--ws.addr 0.0.0.0 \
--ws.origins * \
--ws.api eth,web3,net,rpc

View File

@ -10,7 +10,7 @@ DEFAULT_CERC_RELAY_PEERS=[]
DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN= DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN=
# Base URI for mobymask-app (used for generating invite) # Base URI for mobymask-app (used for generating invite)
DEFAULT_CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3002/#" DEFAULT_CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3004/#"
# Set to false for disabling watcher peer to send txs to L2 # Set to false for disabling watcher peer to send txs to L2
DEFAULT_CERC_ENABLE_PEER_L2_TXS=true DEFAULT_CERC_ENABLE_PEER_L2_TXS=true

View File

@ -22,6 +22,18 @@ COPY run-el.sh /opt/testnet/run.sh
RUN cd /opt/testnet && make genesis-el RUN cd /opt/testnet && make genesis-el
COPY --from=geth /usr/local/bin/geth /usr/local/bin/ COPY --from=geth /usr/local/bin/geth /usr/local/bin/
# Snag the genesis block info.
RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey
RUN cp -rp ~/ethdata ~/tmpeth && \
geth --datadir ~/tmpeth init /opt/testnet/build/el/geth.json && \
geth --datadir ~/tmpeth --http & \
sleep 5 && \
curl -q --location 'localhost:8545' \
--header 'Content-Type: application/json' \
--data '{ "jsonrpc": "2.0", "id": 14, "method": "eth_getBlockByNumber", "params": ["0x0", false] }' \
-o /opt/testnet/build/el/genesis_block.json && \
killall -9 geth && \
rm -rf ~/tmpeth
ENTRYPOINT ["/opt/testnet/run.sh"] ENTRYPOINT ["/opt/testnet/run.sh"]

View File

@ -34,5 +34,7 @@ python3 /apps/el-gen/genesis_geth.py $tmp_dir/genesis-config.yaml | \
jq ".config.istanbulBlock=$istanbul_block" | \ jq ".config.istanbulBlock=$istanbul_block" | \
jq ".config.berlinBlock=$berlin_block" | \ jq ".config.berlinBlock=$berlin_block" | \
jq ".config.londonBlock=$london_block" | \ jq ".config.londonBlock=$london_block" | \
jq ".config.mergeForkBlock=$merge_fork_block" > ../build/el/geth.json jq ".config.mergeForkBlock=$merge_fork_block" | \
jq ".config.mergeNetsplitBlock=$merge_fork_block" \
> ../build/el/geth.json
python3 ../accounts/mnemonic_to_csv.py $tmp_dir/genesis-config.yaml > ../build/el/accounts.csv python3 ../accounts/mnemonic_to_csv.py $tmp_dir/genesis-config.yaml > ../build/el/accounts.csv

View File

@ -7,11 +7,12 @@ fi
ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2` ETHERBASE=`cat /opt/testnet/build/el/accounts.csv | head -1 | cut -d',' -f2`
NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'` NETWORK_ID=`cat /opt/testnet/el/el-config.yaml | grep 'chain_id' | awk '{ print $2 }'`
NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'` NETRESTRICT=`ip addr | grep inet | grep -v '127.0' | awk '{print $2}'`
CERC_ETH_DATADIR="${CERC_ETH_DATADIR:-$HOME/ethdata}"
CERC_PLUGINS_DIR="${CERC_PLUGINS_DIR:-/usr/local/lib/plugeth}"
HOME_DIR=`pwd`
cd /opt/testnet/build/el cd /opt/testnet/build/el
python3 -m http.server 9898 & python3 -m http.server 9898 &
cd $HOME_DIR cd $HOME
START_CMD="geth" START_CMD="geth"
if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then if [ "true" == "$CERC_REMOTE_DEBUG" ] && [ -x "/usr/local/bin/dlv" ]; then
@ -34,7 +35,7 @@ trap 'cleanup' SIGINT SIGTERM
if [ "true" == "$RUN_BOOTNODE" ]; then if [ "true" == "$RUN_BOOTNODE" ]; then
$START_CMD \ $START_CMD \
--datadir=~/ethdata \ --datadir="${CERC_ETH_DATADIR}" \
--nodekeyhex="${BOOTNODE_KEY}" \ --nodekeyhex="${BOOTNODE_KEY}" \
--nodiscover \ --nodiscover \
--ipcdisable \ --ipcdisable \
@ -64,8 +65,8 @@ else
STATEDIFF_OPTS="" STATEDIFF_OPTS=""
if [ "$CERC_RUN_STATEDIFF" == "true" ]; then if [ "$CERC_RUN_STATEDIFF" == "true" ]; then
ready=0 ready=0
echo "Waiting for statediff DB..."
while [ $ready -eq 0 ]; do while [ $ready -eq 0 ]; do
echo "Waiting for statediff DB..."
sleep 1 sleep 1
export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD" export PGPASSWORD="$CERC_STATEDIFF_DB_PASSWORD"
result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \ result=$(psql -h "$CERC_STATEDIFF_DB_HOST" \
@ -73,12 +74,16 @@ else
-U "$CERC_STATEDIFF_DB_USER" \ -U "$CERC_STATEDIFF_DB_USER" \
-d "$CERC_STATEDIFF_DB_NAME" \ -d "$CERC_STATEDIFF_DB_NAME" \
-t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }') -t -c 'select max(version_id) from goose_db_version;' 2>/dev/null | awk '{ print $1 }')
if [ -n "$result" ] && [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then if [ -n "$result" ]; then
echo "DB ready..." echo "DB ready..."
ready=1 if [ $result -ge $CERC_STATEDIFF_DB_GOOSE_MIN_VER ]; then
ready=1
else
echo "DB not at required version (want $CERC_STATEDIFF_DB_GOOSE_MIN_VER, have $result)"
fi
fi fi
done done
STATEDIFF_OPTS="--statediff=true \ STATEDIFF_OPTS="--statediff \
--statediff.db.host=$CERC_STATEDIFF_DB_HOST \ --statediff.db.host=$CERC_STATEDIFF_DB_HOST \
--statediff.db.name=$CERC_STATEDIFF_DB_NAME \ --statediff.db.name=$CERC_STATEDIFF_DB_NAME \
--statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \ --statediff.db.nodeid=$CERC_STATEDIFF_DB_NODE_ID \
@ -88,11 +93,17 @@ else
--statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \ --statediff.db.logstatements=${CERC_STATEDIFF_DB_LOG_STATEMENTS:-false} \
--statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \ --statediff.db.copyfrom=${CERC_STATEDIFF_DB_COPY_FROM:-true} \
--statediff.waitforsync=true \ --statediff.waitforsync=true \
--statediff.workers=${CERC_STATEDIFF_WORKERS:-1} \
--statediff.writing=true" --statediff.writing=true"
if [ -d "${CERC_PLUGINS_DIR}" ]; then
# With plugeth, we separate the statediff options by prefixing with ' -- '
STATEDIFF_OPTS="--pluginsdir "${CERC_PLUGINS_DIR}" -- ${STATEDIFF_OPTS}"
fi
fi fi
$START_CMD \ $START_CMD \
--datadir=~/ethdata \ --datadir="${CERC_ETH_DATADIR}" \
--bootnodes="${ENODE}" \ --bootnodes="${ENODE}" \
--allow-insecure-unlock \ --allow-insecure-unlock \
--http \ --http \
@ -119,8 +130,9 @@ else
--metrics \ --metrics \
--metrics.addr="0.0.0.0" \ --metrics.addr="0.0.0.0" \
--verbosity=${CERC_GETH_VERBOSITY:-3} \ --verbosity=${CERC_GETH_VERBOSITY:-3} \
--vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \ --log.vmodule="${CERC_GETH_VMODULE:-statediff/*=5}" \
--miner.etherbase="${ETHERBASE}" ${STATEDIFF_OPTS} \ --miner.etherbase="${ETHERBASE}" \
${STATEDIFF_OPTS} \
& &
geth_pid=$! geth_pid=$!

View File

@ -1,4 +1,4 @@
FROM sigp/lcli:v4.1.0 AS lcli FROM cerc/lighthouse-cli:local AS lcli
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
FROM cerc/fixturenet-eth-geth:local AS fnetgeth FROM cerc/fixturenet-eth-geth:local AS fnetgeth

View File

@ -27,12 +27,14 @@ lcli \
--deposit-contract-address $ETH1_DEPOSIT_CONTRACT_ADDRESS \ --deposit-contract-address $ETH1_DEPOSIT_CONTRACT_ADDRESS \
--testnet-dir $TESTNET_DIR \ --testnet-dir $TESTNET_DIR \
--min-genesis-active-validator-count $GENESIS_VALIDATOR_COUNT \ --min-genesis-active-validator-count $GENESIS_VALIDATOR_COUNT \
--validator-count $VALIDATOR_COUNT \
--min-genesis-time $GENESIS_TIME \ --min-genesis-time $GENESIS_TIME \
--genesis-delay $GENESIS_DELAY \ --genesis-delay $GENESIS_DELAY \
--genesis-fork-version $GENESIS_FORK_VERSION \ --genesis-fork-version $GENESIS_FORK_VERSION \
--altair-fork-epoch $ALTAIR_FORK_EPOCH \ --altair-fork-epoch $ALTAIR_FORK_EPOCH \
--merge-fork-epoch $MERGE_FORK_EPOCH \ --bellatrix-fork-epoch $MERGE_FORK_EPOCH \
--eth1-id $ETH1_CHAIN_ID \ --eth1-id $ETH1_CHAIN_ID \
--eth1-block-hash $ETH1_BLOCK_HASH \
--eth1-follow-distance 1 \ --eth1-follow-distance 1 \
--seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-slot $SECONDS_PER_SLOT \
--seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \

View File

@ -15,9 +15,6 @@ GENESIS_VALIDATOR_COUNT=${GENESIS_VALIDATOR_COUNT:-80}
# Number of beacon_node instances that you intend to run # Number of beacon_node instances that you intend to run
BN_COUNT=${BN_COUNT:-2} BN_COUNT=${BN_COUNT:-2}
# Number of validator clients
VC_COUNT=${VC_COUNT:-$BN_COUNT}
# Number of seconds to delay to start genesis block. # Number of seconds to delay to start genesis block.
# If started by a script this can be 0, if starting by hand # If started by a script this can be 0, if starting by hand
# use something like 180. # use something like 180.
@ -45,7 +42,9 @@ VC_ARGS=${VC_ARGS:-""}
EXECUTION_ENDPOINT=${EXECUTION_ENDPOINT:-http://localhost:8551} EXECUTION_ENDPOINT=${EXECUTION_ENDPOINT:-http://localhost:8551}
ETH1_GENESIS_JSON=${ETH1_GENESIS_JSON:-"../build/el/geth.json"} ETH1_GENESIS_JSON=${ETH1_GENESIS_JSON:-"../build/el/geth.json"}
ETH1_GENESIS_BLOCK_JSON=${ETH1_GENESIS_BLOCK_JSON:-"../build/el/genesis_block.json"}
ETH1_CONFIG_YAML=${ETH1_CONFIG_YAML:-"../el/el-config.yaml"} ETH1_CONFIG_YAML=${ETH1_CONFIG_YAML:-"../el/el-config.yaml"}
ETH1_BLOCK_HASH=${ETH1_BLOCK_HASH:-`cat $ETH1_GENESIS_BLOCK_JSON | jq -r '.result.hash' | cut -d'x' -f2`}
ETH1_CHAIN_ID=${ETH1_CHAIN_ID:-`cat $ETH1_GENESIS_JSON | jq -r '.config.chainId'`} ETH1_CHAIN_ID=${ETH1_CHAIN_ID:-`cat $ETH1_GENESIS_JSON | jq -r '.config.chainId'`}
ETH1_TTD=${ETH1_TTD:-`cat $ETH1_GENESIS_JSON | jq -r '.config.terminalTotalDifficulty'`} ETH1_TTD=${ETH1_TTD:-`cat $ETH1_GENESIS_JSON | jq -r '.config.terminalTotalDifficulty'`}

View File

@ -4,7 +4,14 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
fi fi
MIN_BLOCK_NUM=${1:-${MIN_BLOCK_NUM:-3}} MIN_BLOCK_NUM=${1:-${MIN_BLOCK_NUM:-3}}
STATUSES=("geth to generate DAG" "beacon phase0" "beacon altair" "beacon bellatrix pre-merge" "beacon bellatrix merge" "block number $MIN_BLOCK_NUM") STATUSES=(
"geth to generate DAG"
"beacon phase0"
"beacon altair"
"beacon bellatrix pre-merge"
"beacon bellatrix merge"
"block number $MIN_BLOCK_NUM"
)
STATUS=0 STATUS=0
LIGHTHOUSE_BASE_URL=${LIGHTHOUSE_BASE_URL} LIGHTHOUSE_BASE_URL=${LIGHTHOUSE_BASE_URL}
@ -36,7 +43,6 @@ MARKER="."
function inc_status() { function inc_status() {
echo " done" echo " done"
MARKEr="."
STATUS=$((STATUS + 1)) STATUS=$((STATUS + 1))
if [ $STATUS -lt ${#STATUSES[@]} ]; then if [ $STATUS -lt ${#STATUSES[@]} ]; then
echo -n "Waiting for ${STATUSES[$STATUS]}..." echo -n "Waiting for ${STATUSES[$STATUS]}..."

View File

@ -1,27 +0,0 @@
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
FROM golang:1.19.4-bullseye AS delve
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM ubuntu:22.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3 python3-dev python3-pip curl wget jq gettext gettext-base openssl bash dnsutils postgresql-client make iproute2 netcat && \
rm -rf /var/lib/apt/lists/*
COPY --from=delve /go/bin/dlv /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
COPY --from=ethgen /apps /apps
RUN wget -O /usr/local/bin/geth https://github.com/openrelayxyz/plugeth/releases/download/v1.11.6.1.0/geth-linux-amd64-v1.1.0-v1.11.6.1.0 && chmod a+x /usr/local/bin/geth
RUN cd /apps/el-gen && pip3 install -r requirements.txt
COPY genesis /opt/testnet
COPY run-el.sh /opt/testnet/run.sh
RUN cd /opt/testnet && make genesis-el
RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey
ENTRYPOINT ["/opt/testnet/run.sh"]

View File

@ -1,17 +0,0 @@
#!/usr/bin/env bash
# Build cerc/fixturenet-eth-plugeth
set -x
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
if [ ! -d "${SCRIPT_DIR}/genesis" ]; then
cp -frp ${SCRIPT_DIR}/../cerc-fixturenet-eth-geth/genesis ${SCRIPT_DIR}/genesis
fi
if [ ! -d "${SCRIPT_DIR}/run-el.sh" ]; then
cp -fp ${SCRIPT_DIR}/../cerc-fixturenet-eth-geth/run-el.sh ${SCRIPT_DIR}/
fi
docker build -t cerc/fixturenet-eth-plugeth:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR

View File

@ -1,4 +1,4 @@
FROM sigp/lcli:v4.1.0 AS lcli FROM cerc/lighthouse-cli:local AS lcli
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
FROM cerc/fixturenet-plugeth-plugeth:local AS fnetgeth FROM cerc/fixturenet-plugeth-plugeth:local AS fnetgeth

View File

@ -1,20 +1,19 @@
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
FROM golang:1.19.4-bullseye AS delve FROM golang:1.19-alpine as delve
RUN go install github.com/go-delve/delve/cmd/dlv@latest RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM ubuntu:22.04 FROM cerc/plugeth:local as geth
RUN apt-get update && \ FROM cerc/plugeth-statediff:local as statediff
apt-get install -y --no-install-recommends \
python3 python3-dev python3-pip curl wget jq gettext gettext-base openssl bash dnsutils postgresql-client make iproute2 netcat && \ FROM alpine:3.17
rm -rf /var/lib/apt/lists/* RUN apk add --no-cache python3 python3-dev py3-pip curl wget jq build-base gettext libintl openssl bash bind-tools postgresql-client
COPY --from=delve /go/bin/dlv /usr/local/bin/ COPY --from=delve /go/bin/dlv /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/ COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/ COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
COPY --from=ethgen /apps /apps COPY --from=ethgen /apps /apps
RUN wget -O /usr/local/bin/geth https://github.com/openrelayxyz/plugeth/releases/download/v1.11.6.1.0/geth-linux-amd64-v1.1.0-v1.11.6.1.0 && chmod a+x /usr/local/bin/geth
RUN cd /apps/el-gen && pip3 install -r requirements.txt RUN cd /apps/el-gen && pip3 install -r requirements.txt
COPY genesis /opt/testnet COPY genesis /opt/testnet
@ -22,6 +21,22 @@ COPY run-el.sh /opt/testnet/run.sh
RUN cd /opt/testnet && make genesis-el RUN cd /opt/testnet && make genesis-el
COPY --from=geth /usr/local/bin/geth /usr/local/bin/
RUN mkdir -p /usr/local/lib/plugeth/
COPY --from=statediff /usr/local/lib/statediff.so /usr/local/lib/plugeth/
# Snag the genesis block info.
RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey
RUN cp -rp ~/ethdata ~/tmpeth && \
geth --datadir ~/tmpeth init /opt/testnet/build/el/geth.json && \
geth --datadir ~/tmpeth --http & \
sleep 5 && \
curl -q --location 'localhost:8545' \
--header 'Content-Type: application/json' \
--data '{ "jsonrpc": "2.0", "id": 14, "method": "eth_getBlockByNumber", "params": ["0x0", false] }' \
-o /opt/testnet/build/el/genesis_block.json && \
killall -9 geth && \
rm -rf ~/tmpeth
ENTRYPOINT ["/opt/testnet/run.sh"] ENTRYPOINT ["/opt/testnet/run.sh"]

View File

@ -16,7 +16,7 @@ db-waitforsync=bool Should the statediff service start once geth has synced to
rpc-port=port change RPC port (default: 8545) rpc-port=port change RPC port (default: 8545)
rpc-addr=address change RPC address (default: 127.0.0.1) rpc-addr=address change RPC address (default: 127.0.0.1)
chain-id=number change chain ID (default: 99) chain-id=number change chain ID (default: 99)
extra-args=name extra args to pass to geth on startup extra-args=name extra args to pass to geth on startup
period=seconds use a block time instead of instamine period=seconds use a block time instead of instamine
accounts=number create multiple accounts (default: 1) accounts=number create multiple accounts (default: 1)
address=address eth address to add to genesis address=address eth address to add to genesis

View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
# Build cerc/go-opera
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
# Repo's dockerfile gives build error because it's hardcoded for go 1.17; go 1.19 is required
sed -i 's/FROM golang:1\.[0-9]*-alpine as builder/FROM golang:1.19-alpine as builder/' ${CERC_REPO_BASE_DIR}/go-opera/docker/Dockerfile.opera
docker build -f ${CERC_REPO_BASE_DIR}/go-opera/docker/Dockerfile.opera -t cerc/go-opera:local ${build_command_args} ${CERC_REPO_BASE_DIR}/go-opera

View File

@ -0,0 +1,4 @@
#!/usr/bin/env bash
# Build the lasso image
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/lasso:local -f ${CERC_REPO_BASE_DIR}/lasso/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/lasso

View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# Build cerc/lighthouse-cli
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
project_dir=${CERC_REPO_BASE_DIR}/lighthouse
docker build -t cerc/lighthouse-cli:local --build-arg PORTABLE=true -f ${project_dir}/lcli/Dockerfile ${build_command_args} ${project_dir}

View File

@ -1,4 +1,5 @@
FROM sigp/lighthouse:v4.1.0-modern ARG TAG_SUFFIX="-modern"
FROM sigp/lighthouse:v4.1.0${TAG_SUFFIX}
RUN apt-get update; apt-get install bash netcat curl less jq -y; RUN apt-get update; apt-get install bash netcat curl less jq -y;

View File

@ -0,0 +1,5 @@
#!/usr/bin/env bash
# Build cerc/go-opera
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/reth:local ${build_command_args} ${CERC_REPO_BASE_DIR}/reth

View File

@ -4,7 +4,7 @@ if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x set -x
fi fi
# Test if the container's filesystem is old (run previously) or new # Test if the container's filesystem is old (run previously) or new
EXISTSFILENAME=/var/exists EXISTSFILENAME=/data/exists
echo "Test container starting" echo "Test container starting"
if [[ -f "$EXISTSFILENAME" ]]; if [[ -f "$EXISTSFILENAME" ]];
then then

View File

@ -41,3 +41,6 @@ cerc/watcher-azimuth
cerc/ipld-eth-state-snapshot cerc/ipld-eth-state-snapshot
cerc/watcher-gelato cerc/watcher-gelato
cerc/lotus cerc/lotus
cerc/go-opera
cerc/lasso
cerc/reth

View File

@ -5,7 +5,6 @@ go-ethereum-foundry
ipld-eth-beacon-db ipld-eth-beacon-db
ipld-eth-beacon-indexer ipld-eth-beacon-indexer
ipld-eth-server ipld-eth-server
lighthouse
laconicd laconicd
fixturenet-laconicd fixturenet-laconicd
fixturenet-eth fixturenet-eth
@ -28,3 +27,6 @@ fixturenet-pocket
watcher-azimuth watcher-azimuth
watcher-gelato watcher-gelato
fixturenet-lotus fixturenet-lotus
mainnet-go-opera
lasso
reth

View File

@ -34,3 +34,6 @@ github.com/cerc-io/ipld-eth-state-snapshot
github.com/cerc-io/gelato-watcher-ts github.com/cerc-io/gelato-watcher-ts
github.com/filecoin-project/lotus github.com/filecoin-project/lotus
git.vdb.to/cerc-io/test-project git.vdb.to/cerc-io/test-project
github.com/Fantom-foundation/go-opera
github.com/cerc-io/lasso
github.com/paradigmxyz/reth

View File

@ -6,9 +6,11 @@ repos:
- github.com/cerc-io/tx-spammer - github.com/cerc-io/tx-spammer
- github.com/cerc-io/ipld-eth-server - github.com/cerc-io/ipld-eth-server
- github.com/cerc-io/ipld-eth-db - github.com/cerc-io/ipld-eth-db
- github.com/cerc-io/go-ethereum - github.com/cerc-io/lighthouse
containers: containers:
- cerc/go-ethereum
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli
- cerc/fixturenet-eth-geth - cerc/fixturenet-eth-geth
- cerc/fixturenet-eth-lighthouse - cerc/fixturenet-eth-lighthouse
- cerc/ipld-eth-server - cerc/ipld-eth-server

View File

@ -4,10 +4,12 @@ decription: "Ethereum Fixturenet w/ tx-spammer"
repos: repos:
- github.com/cerc-io/go-ethereum - github.com/cerc-io/go-ethereum
- github.com/cerc-io/tx-spammer - github.com/cerc-io/tx-spammer
- dboreham/foundry - github.com/dboreham/foundry
- github.com/cerc-io/lighthouse
containers: containers:
- cerc/go-ethereum - cerc/go-ethereum
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli
- cerc/fixturenet-eth-geth - cerc/fixturenet-eth-geth
- cerc/fixturenet-eth-lighthouse - cerc/fixturenet-eth-lighthouse
- cerc/tx-spammer - cerc/tx-spammer

View File

@ -1,6 +1,6 @@
# fixturenet-eth # fixturenet-eth
Instructions for deploying a local a geth + lighthouse blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator#user-mode)): Instructions for deploying a local a geth + lighthouse blockchain "fixturenet" for development and testing purposes using laconic-stack-orchestrator (the installation of which is covered [here](https://github.com/cerc-io/stack-orchestrator)):
## Clone required repositories ## Clone required repositories

View File

@ -3,10 +3,12 @@ name: fixturenet-eth
decription: "Ethereum Fixturenet" decription: "Ethereum Fixturenet"
repos: repos:
- github.com/cerc-io/go-ethereum - github.com/cerc-io/go-ethereum
- github.com/cerc-io/lighthouse
- github.com/dboreham/foundry - github.com/dboreham/foundry
containers: containers:
- cerc/go-ethereum - cerc/go-ethereum
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli
- cerc/fixturenet-eth-geth - cerc/fixturenet-eth-geth
- cerc/fixturenet-eth-lighthouse - cerc/fixturenet-eth-lighthouse
- cerc/foundry - cerc/foundry

View File

@ -14,14 +14,6 @@ laconic-so --stack fixturenet-optimism setup-repositories
# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command # If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command
``` ```
Checkout to the required versions and branches in repos:
```bash
# Optimism
cd ~/cerc/optimism
git checkout v1.0.4
```
Build the container images: Build the container images:
```bash ```bash
@ -50,12 +42,11 @@ Deploy the stack:
laconic-so --stack fixturenet-optimism deploy up laconic-so --stack fixturenet-optimism deploy up
``` ```
If you get the error `service "fixturenet-optimism-contracts" didn't complete successfully: exit 1` with ~25 lines of Traceback, wait 15-20 mins then re-run the command.
The `fixturenet-optimism-contracts` service takes a while to complete running as it: The `fixturenet-optimism-contracts` service takes a while to complete running as it:
1. waits for the 'Merge' to happen on L1 1. waits for the 'Merge' to happen on L1
2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups) 2. waits for a finalized block to exist on L1 (so that it can be taken as a starting block for roll ups)
3. deploys the L1 contracts 3. deploys the L1 contracts
It may restart a few times after running into errors.
To list and monitor the running containers: To list and monitor the running containers:
@ -123,6 +114,5 @@ docker volume rm $(docker volume ls -q --filter "name=.*l1_deployment|.*l2_accou
## Known Issues ## Known Issues
* `fixturenet-eth` currently starts fresh on a restart
* Resource requirements (memory + time) for building the `cerc/foundry` image are on the higher side * Resource requirements (memory + time) for building the `cerc/foundry` image are on the higher side
* `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation) * `cerc/optimism-contracts` image is currently based on `cerc/foundry` (Optimism requires foundry installation)

View File

@ -14,14 +14,6 @@ laconic-so --stack fixturenet-optimism setup-repositories --exclude github.com/c
# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command # If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command
``` ```
Checkout to the required versions and branches in repos:
```bash
# Optimism
cd ~/cerc/optimism
git checkout v1.0.4
```
Build the container images: Build the container images:
```bash ```bash

View File

@ -3,12 +3,14 @@ name: fixturenet-optimism
decription: "Optimism Fixturenet" decription: "Optimism Fixturenet"
repos: repos:
- github.com/cerc-io/go-ethereum - github.com/cerc-io/go-ethereum
- github.com/cerc-io/lighthouse
- github.com/dboreham/foundry - github.com/dboreham/foundry
- github.com/ethereum-optimism/optimism - github.com/ethereum-optimism/optimism@v1.0.4
- github.com/ethereum-optimism/op-geth - github.com/ethereum-optimism/op-geth@v1.101105.2
containers: containers:
- cerc/go-ethereum - cerc/go-ethereum
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli
- cerc/fixturenet-eth-geth - cerc/fixturenet-eth-geth
- cerc/fixturenet-eth-lighthouse - cerc/fixturenet-eth-lighthouse
- cerc/foundry - cerc/foundry

View File

@ -3,14 +3,26 @@ name: fixturenet-plugeth-tx
decription: "plugeth Ethereum Fixturenet w/ tx-spammer" decription: "plugeth Ethereum Fixturenet w/ tx-spammer"
repos: repos:
- github.com/cerc-io/tx-spammer - github.com/cerc-io/tx-spammer
- dboreham/foundry - github.com/dboreham/foundry
- github.com/cerc-io/lighthouse
- github.com/cerc-io/ipld-eth-db@v5
- github.com/cerc-io/ipld-eth-server@v5
- git.vdb.to/cerc-io/plugeth@statediff-wip
- git.vdb.to/cerc-io/plugeth-statediff@dev-local-build
containers: containers:
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli
- cerc/plugeth-statediff
- cerc/plugeth
- cerc/fixturenet-plugeth-plugeth - cerc/fixturenet-plugeth-plugeth
- cerc/fixturenet-plugeth-lighthouse - cerc/fixturenet-plugeth-lighthouse
- cerc/tx-spammer - cerc/tx-spammer
- cerc/foundry - cerc/foundry
- cerc/ipld-eth-db
- cerc/ipld-eth-server
pods: pods:
- ipld-eth-db
- ipld-eth-server
- fixturenet-plugeth - fixturenet-plugeth
- foundry - foundry
- tx-spammer - tx-spammer

View File

@ -3,11 +3,13 @@ name: fixturenet-pocket
description: "A single node pocket chain that can serve relays from the geth-1 node in eth-fixturenet" description: "A single node pocket chain that can serve relays from the geth-1 node in eth-fixturenet"
repos: repos:
- github.com/cerc-io/go-ethereum - github.com/cerc-io/go-ethereum
- github.com/cerc-io/lighthouse
- github.com/pokt-network/pocket-core - github.com/pokt-network/pocket-core
- github.com/pokt-network/pocket-core-deployments # contains the dockerfile - github.com/pokt-network/pocket-core-deployments # contains the dockerfile
containers: containers:
- cerc/go-ethereum - cerc/go-ethereum
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli
- cerc/fixturenet-eth-geth - cerc/fixturenet-eth-geth
- cerc/fixturenet-eth-lighthouse - cerc/fixturenet-eth-lighthouse
- cerc/pocket - cerc/pocket

View File

@ -0,0 +1,7 @@
# lasso
```
laconic-so --stack lasso setup-repositories
laconic-so --stack lasso build-containers
laconic-so --stack lasso deploy up
```

View File

@ -0,0 +1,8 @@
version: "0.1"
name: lasso
repos:
- github.com/cerc-io/lasso
containers:
- cerc/lasso
pods:
- lasso

View File

@ -0,0 +1,88 @@
# Opera (Fantom)
Deploy a Fantom API node.
## Clone required repositories
```
$ laconic-so --stack mainnet-go-opera setup-repositories
```
## Build the fixturenet-eth containers
```
$ laconic-so --stack mainnet-go-opera build-containers
```
## Deploy the stack
```
$ laconic-so --stack mainnet-go-opera deploy up
```
## Check logs
```
$ laconic-so --stack mainnet-go-opera deploy logs
```
You'll see something like:
```
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | Connecting to download.fantom.network (65.108.45.88:443)
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | saving to 'mainnet-109331-no-history.g'
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | mainnet-109331-no-hi 100% |********************************| 16326 0:00:00 ETA
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | 'mainnet-109331-no-history.g' saved
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Maximum peer count total=50
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Smartcard socket not found, disabling err="stat /run/pcscd/pcscd.comm: no such file or directory"
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.034] Genesis file is a known preset name="Mainnet-109331 without history"
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.052] Applying genesis state
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.052] - Reading epochs unit 0
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.054] - Reading blocks unit 0
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.530] Applied genesis state name=main id=250 genesis=0x4a53c5445584b3bfc20dbfb2ec18ae20037c716f3ba2d9e1da768a9deca17cb4
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.531] Regenerated local transaction journal transactions=0 accounts=0
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.532] Starting peer-to-peer node instance=go-opera/v1.1.2-rc.5-50cd051d-1677276206/linux-amd64/go1.19.10
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.536] New local node record seq=1 id=5e40f984908317cd ip=127.0.0.1 udp=5050 tcp=5050
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.537] Started P2P networking self=enode://3ffb15988ca5a79b63dbe48be89d9d8b48dc4845d318fe08231a0ab49d3b23476e2561044311dc257405f882f7c52ff7b128c8bd1b6d85cf7205a6fed6555443@127.0.0.1:5050
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.537] IPC endpoint opened url=/root/.opera/opera.ipc
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] HTTP server started endpoint=[::]:18545 prefix= cors=* vhosts=localhost
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] WebSocket enabled url=ws://[::]:18546
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Rebuilding state snapshot
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] EVM snapshot module=gossip-store at=000000..000000 generating=true
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Resuming state snapshot generation accounts=0 slots=0 storage=0.00B elapsed="189.74µs"
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:33.538] Generated state snapshot accounts=0 slots=0 storage=0.00B elapsed="265.061µs"
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:34.788] New LLR summary last_epoch=0 last_block=37676611 new_evs=0 new_ers=0 new_bvs=64 new_brs=0 age=none
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:35.040] New local node record seq=2 id=5e40f984908317cd ip=186.233.184.56 udp=5050 tcp=5050
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:42.788] New LLR summary last_epoch=114604 last_block=37753891 new_evs=24581 new_ers=5272 new_bvs=233257 new_brs=780 age=1y1mo5d
laconic-f028f14527b95e2eb97f0c0229d00939-go-opera-1 | INFO [06-20|13:32:50.827] New LLR summary last_epoch=115574 last_block=38118749 new_evs=4907 new_ers=971 new_bvs=1098760 new_brs=3768 age=1y1mo2d
```
Consecutive lines of "New LLR summary" shows that your node is sync'ing.
## Use the opera admin console
```
$ docker exec -it $(docker ps -q --filter "name=go-opera") /bin/sh
```
then:
```
$ ./opera attach
```
and check the node info:
```
> admin.nodeInfo
```
Run `exit` twice to return to your terminal.
## Clean up
Stop all services running in the background:
```bash
$ laconic-so --stack mainnet-go-opera deploy down
```

View File

@ -0,0 +1,9 @@
version: "1.1"
name: mainnet-opera
decription: "Fantom mainnet node"
repos:
- github.com/Fantom-foundation/go-opera@release/1.1.2-rc.5
containers:
- cerc/go-opera
pods:
- mainnet-go-opera

View File

@ -0,0 +1,2 @@
# Laconic Mainnet Deployment (experimental)

View File

@ -0,0 +1,57 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os
from shutil import copyfile
import sys
from .util import get_stack_config_filename, get_parsed_deployment_spec
default_spec_file_content = """stack: mainnet-laconic
data_dir: /my/path
node_name: my-node-name
"""
def make_default_deployment_dir():
return "deployment-001"
@click.command()
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
with open(output, "w") as output_file:
output_file.write(default_spec_file_content)
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.pass_context
def create(ctx, spec_file, deployment_dir):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
if ctx.debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
deployment_dir = make_default_deployment_dir()
if os.path.exists(deployment_dir):
print(f"Error: {deployment_dir} already exists")
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
stack_file = get_stack_config_filename(parsed_spec.stack)
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))

View File

@ -0,0 +1,31 @@
version: "1.0"
name: mainnet-laconic
description: "Mainnet laconic node"
repos:
- cerc-io/laconicd
- lirewine/debug
- lirewine/crypto
- lirewine/gem
- lirewine/sdk
- cerc-io/laconic-sdk
- cerc-io/laconic-registry-cli
- cerc-io/laconic-console
npms:
- laconic-sdk
- laconic-registry-cli
- debug
- crypto
- sdk
- gem
- laconic-console
containers:
- cerc/laconicd
- cerc/laconic-registry-cli
- cerc/laconic-console-host
pods:
- mainnet-laconicd
- fixturenet-laconic-console
config:
cli:
key: laconicd.mykey
address: laconicd.myaddress

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Dump environment variables for debugging
echo "Environment variables:"
env
# Test laconic stack
echo "Running laconic stack test"
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a non-default repo dir
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
echo "Testing this package: $TEST_TARGET_SO"
echo "Test version command"
reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
rm -rf $CERC_REPO_BASE_DIR
mkdir -p $CERC_REPO_BASE_DIR
# Test bringing the test container up and down
# with and without volume removal
$TEST_TARGET_SO --stack test setup-repositories
$TEST_TARGET_SO --stack test build-containers
$TEST_TARGET_SO --stack test deploy up
$TEST_TARGET_SO --stack test deploy down
# The next time we bring the container up the volume will be old (from the previous run above)
$TEST_TARGET_SO --stack test deploy up
log_output_1=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_1" == *"Filesystem is old"* ]]; then
echo "Retain volumes test: passed"
else
echo "Retain volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
# Now when we bring the container up the volume will be new again
$TEST_TARGET_SO --stack test deploy up
log_output_2=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
echo "Delete volumes test: passed"
else
echo "Delete volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
echo "Test passed"

View File

@ -18,26 +18,6 @@ laconic-so --stack mobymask-v2 setup-repositories
NOTE: If repositories already exist and are checked out to different versions, `setup-repositories` command will throw an error. NOTE: If repositories already exist and are checked out to different versions, `setup-repositories` command will throw an error.
For getting around this, the repositories mentioned below can be removed and then run the command. For getting around this, the repositories mentioned below can be removed and then run the command.
Checkout to the required versions and branches in repos
```bash
# watcher-ts
cd ~/cerc/watcher-ts
git checkout v0.2.41
# mobymask-v2-watcher-ts
cd ~/cerc/mobymask-v2-watcher-ts
git checkout v0.1.1
# MobyMask
cd ~/cerc/MobyMask
git checkout v0.1.2
# Optimism
cd ~/cerc/optimism
git checkout v1.0.4
```
Build the container images: Build the container images:
```bash ```bash
@ -51,17 +31,21 @@ Deploy the stack:
* Deploy the containers: * Deploy the containers:
```bash ```bash
laconic-so --stack mobymask-v2 deploy-system up laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 up
``` ```
* List and check the health status of all the containers using `docker ps` and wait for them to be `healthy` NOTE: The `fixturenet-optimism-contracts` service takes a while to run to completion and it may restart a few times after running into errors.
NOTE: The `mobymask-app` container might not start; if the app is not running at http://localhost:3002, restart the container using it's id: * To list down and monitor the running containers:
```bash ```bash
docker ps -a | grep "mobymask-app" laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 ps
docker restart <CONTAINER_ID> # With status
docker ps -a
# Check logs for a container
docker logs -f <CONTAINER_ID>
``` ```
## Tests ## Tests
@ -88,11 +72,12 @@ docker ps | grep -E 'mobymask-app|peer-test-app'
### mobymask-app ### mobymask-app
The mobymask-app should be running at http://localhost:3002 * The mobymask-app should be running at http://localhost:3002
* The lxdao-mobymask-app should be running at http://localhost:3004
### peer-test-app ### peer-test-app
The peer-test-app should be running at http://localhost:3003 * The peer-test-app should be running at http://localhost:3003
## Details ## Details
@ -111,15 +96,15 @@ Follow the [demo](./demo.md) to try out the MobyMask app with L2 chain
Stop all the services running in background run: Stop all the services running in background run:
```bash ```bash
laconic-so --stack mobymask-v2 deploy-system down 30 laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 down 30
``` ```
Clear volumes created by this stack: Clear volumes created by this stack:
```bash ```bash
# List all relevant volumes # List all relevant volumes
docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data" docker volume ls -q --filter "name=mobymask_v2"
# Remove all the listed volumes # Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment|.*l1_deployment|.*l2_accounts|.*l2_config|.*l2_geth_data") docker volume rm $(docker volume ls -q --filter "name=mobymask_v2")
``` ```

View File

@ -9,23 +9,23 @@
The invite link is seen at the end of the logs. Example log: The invite link is seen at the end of the logs. Example log:
```bash ```bash
laconic-bfb01caf98b1b8f7c8db4d33f11b905a-mobymask-1 | http://127.0.0.1:3002/#/members?invitation=%7B%22v%22%3A1%2C%22signedDelegations%22%3A%5B%7B%22signature%22%3A%220x7559bd412f02677d60820e38243acf61547f79339395a34f7d4e1630e645aeb30535fc219f79b6fbd3af0ce3bd05132ad46d2b274a9fbc4c36bc71edd09850891b%22%2C%22delegation%22%3A%7B%22delegate%22%3A%220xc0838c92B2b71756E0eAD5B3C1e1F186baeEAAac%22%2C%22authority%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%2C%22caveats%22%3A%5B%7B%22enforcer%22%3A%220x558024C7d593B840E1BfD83E9B287a5CDad4db15%22%2C%22terms%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%7D%5D%7D%7D%5D%2C%22key%22%3A%220x98da9805821f1802196443e578fd32af567bababa0a249c07c82df01ecaa7d8d%22%7D http://127.0.0.1:3004/#/members?invitation=%7B%22v%22%3A1%2C%22signedDelegations%22%3A%5B%7B%22signature%22%3A%220x7559bd412f02677d60820e38243acf61547f79339395a34f7d4e1630e645aeb30535fc219f79b6fbd3af0ce3bd05132ad46d2b274a9fbc4c36bc71edd09850891b%22%2C%22delegation%22%3A%7B%22delegate%22%3A%220xc0838c92B2b71756E0eAD5B3C1e1F186baeEAAac%22%2C%22authority%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%2C%22caveats%22%3A%5B%7B%22enforcer%22%3A%220x558024C7d593B840E1BfD83E9B287a5CDad4db15%22%2C%22terms%22%3A%220x0000000000000000000000000000000000000000000000000000000000000000%22%7D%5D%7D%7D%5D%2C%22key%22%3A%220x98da9805821f1802196443e578fd32af567bababa0a249c07c82df01ecaa7d8d%22%7D
``` ```
* Open the invite link in a browser to use the mobymask-app. * Open the invite link in a browser to use the mobymask-app.
NOTE: Before opening the invite link, clear the browser cache (local storage) for http://127.0.0.1:3002 to remove old invitations NOTE: Before opening the invite link, clear the browser cache (local storage) for http://127.0.0.1:3004 to remove old invitations
* In the debug panel, check if it is connected to the p2p network (it should be connected to at least one other peer for pubsub to work). * In the debug panel, check if it is connected to the p2p network (it should be connected to at least one other peer for pubsub to work).
* Create an invite link in the app by clicking on `Create new invite link` button. * Create an invite link in the app by clicking on `Create new invite link` button in the `My invitees` section.
* Switch to the `MESSAGES` tab in debug panel for viewing incoming messages later. * Switch to the `MESSAGES` tab in debug panel for viewing incoming messages later.
* Open the invite link in a new browser with different profile (to simulate remote browser) * Open the invite link in a new browser with different profile (to simulate remote browser)
* Check that it is connected to any other peer in the network. * Check that it is connected to a peer in the network.
* In `Report a phishing attempt` section, report multiple phishers using the `Submit` button. Click on the `Submit batch to p2p network` button. This broadcasts signed invocations to the connected peers. * In the `Pending reports` section, enter multiple phisher records and click on the `Submit batch to p2p network` button. This broadcasts signed invocations to the connected peers.
* In the `MESSAGES` tab of other browsers, a message can be seen with the signed invocations. * In the `MESSAGES` tab of other browsers, a message can be seen with the signed invocations.
@ -66,7 +66,7 @@
* Get the deployed contract address: * Get the deployed contract address:
```bash ```bash
docker exec -it $(docker ps -aq --filter name="mobymask-app") cat /config/config.yml docker exec -it $(docker ps -aq --filter name="lxdao-mobymask-app") cat /config/config.yml
``` ```
The value of `address` field is the deployed contract address The value of `address` field is the deployed contract address
@ -91,15 +91,14 @@
* Watcher internally is using L2 chain `eth_getStorageAt` method. * Watcher internally is using L2 chain `eth_getStorageAt` method.
* Check the phisher name in mobymask app in `Check Phisher Status` section. * Check the phisher name in mobymask app in `Check Phisher Status` section.
* Watcher GQL API is used for checking phisher. * Watcher GQL API is used for checking phisher.
* Manage the invitations by clicking on the `Outstanding Invitations in p2p network`. * Manage invitations in the `Outstanding invitations (p2p network)` tab in `My Invitations` section.
* Revoke the created invitation by clicking on the `Revoke` button.
* Revoke the created invitation by clicking on `Revoke (p2p network)`
* Revocation messages can be seen in the debug panel `MESSAGES` tab of other browsers. * Revocation messages can be seen in the debug panel `MESSAGES` tab of other browsers.
* Check the watcher peer logs. It should receive a message and log the transaction receipt for a `revoke` message. * Also, check the watcher peer logs. It should receive a message and log the transaction receipt for a `revoke` message.
* Try reporting a phisher from the revoked invitee's browser. * Try reporting a phisher from the revoked invitee's browser.

View File

@ -14,22 +14,6 @@ laconic-so --stack mobymask-v2 setup-repositories --include github.com/cerc-io/M
# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command # If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command
``` ```
Checkout to the required versions and branches in repos:
```bash
# watcher-ts
cd ~/cerc/watcher-ts
git checkout v0.2.41
# mobymask-v2-watcher-ts
cd ~/cerc/mobymask-v2-watcher-ts
git checkout v0.1.1
# MobyMask
cd ~/cerc/MobyMask
git checkout v0.1.2
```
Build the container images: Build the container images:
```bash ```bash
@ -65,7 +49,7 @@ Create and update an env file to be used in the next step ([defaults](../../conf
# Base URI for mobymask-app # Base URI for mobymask-app
# (used for generating a root invite link after deploying the contract) # (used for generating a root invite link after deploying the contract)
CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3002/#" CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3004/#"
# (Optional) Domain to be used in the relay node's announce address # (Optional) Domain to be used in the relay node's announce address
CERC_RELAY_ANNOUNCE_DOMAIN= CERC_RELAY_ANNOUNCE_DOMAIN=
@ -88,16 +72,16 @@ Create and update an env file to be used in the next step ([defaults](../../conf
### Deploy the stack ### Deploy the stack
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 --env-file <PATH_TO_ENV_FILE> up laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mobymask-v2 --env-file <PATH_TO_ENV_FILE> up
``` ```
To list down and monitor the running containers: To list down and monitor the running containers:
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 ps laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mobymask-v2 ps
# With status # With status
docker ps docker ps -a
# Check logs for a container # Check logs for a container
docker logs -f <CONTAINER_ID> docker logs -f <CONTAINER_ID>
@ -124,15 +108,15 @@ For deploying the web-app(s) separately after deploying the watcher, follow [web
Stop all services running in the background: Stop all services running in the background:
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include watcher-mobymask-v2 down laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include watcher-mobymask-v2 down
``` ```
Clear volumes created by this stack: Clear volumes created by this stack:
```bash ```bash
# List all relevant volumes # List all relevant volumes
docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment" docker volume ls -q --filter "name=mobymask_v2"
# Remove all the listed volumes # Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter "name=.*mobymask_watcher_db_data|.*peers_ids|.*mobymask_deployment") docker volume rm $(docker volume ls -q --filter "name=mobymask_v2")
``` ```

View File

@ -2,21 +2,24 @@ version: "1.0"
name: mobymask-v2 name: mobymask-v2
repos: repos:
- github.com/cerc-io/go-ethereum - github.com/cerc-io/go-ethereum
- github.com/cerc-io/lighthouse
- github.com/dboreham/foundry - github.com/dboreham/foundry
- github.com/ethereum-optimism/optimism - github.com/ethereum-optimism/optimism@v1.0.4
- github.com/ethereum-optimism/op-geth - github.com/ethereum-optimism/op-geth@v1.101105.2
- github.com/cerc-io/watcher-ts - github.com/cerc-io/watcher-ts@v0.2.43
- github.com/cerc-io/mobymask-v2-watcher-ts - github.com/cerc-io/mobymask-v2-watcher-ts@v0.1.1
- github.com/cerc-io/MobyMask - github.com/cerc-io/MobyMask@v0.1.2
containers: containers:
- cerc/go-ethereum - cerc/go-ethereum
- cerc/lighthouse - cerc/lighthouse
- cerc/lighthouse-cli
- cerc/fixturenet-eth-geth - cerc/fixturenet-eth-geth
- cerc/fixturenet-eth-lighthouse - cerc/fixturenet-eth-lighthouse
- cerc/foundry - cerc/foundry
- cerc/optimism-contracts - cerc/optimism-contracts
- cerc/optimism-l2geth - cerc/optimism-l2geth
- cerc/optimism-op-batcher - cerc/optimism-op-batcher
- cerc/optimism-op-proposer
- cerc/optimism-op-node - cerc/optimism-op-node
- cerc/watcher-ts - cerc/watcher-ts
- cerc/watcher-mobymask-v2 - cerc/watcher-mobymask-v2

View File

@ -30,22 +30,6 @@ Clone required repositories:
# 100%|##############################################################################################################################################| 1.41k/1.41k [00:18<00:00, 76.4B/s] # 100%|##############################################################################################################################################| 1.41k/1.41k [00:18<00:00, 76.4B/s]
``` ```
Checkout to the required versions and branches in repos:
```bash
# watcher-ts
cd ~/cerc/watcher-ts
git checkout v0.2.41
# mobymask-v2-watcher-ts
cd ~/cerc/mobymask-v2-watcher-ts
git checkout v0.1.1
# MobyMask
cd ~/cerc/MobyMask
git checkout v0.1.2
```
Build the container images: Build the container images:
```bash ```bash
@ -126,8 +110,8 @@ To list down and monitor the running containers:
# Expected output: # Expected output:
# Running containers: # Running containers:
# id: 25cc3a1cbda27fcd9c2ad4c772bd753ccef1e178f901a70e6ff4191d4a8684e9, name: mobymask_v2-mobymask-watcher-db-1, ports: 0.0.0.0:15432->5432/tcp # id: 25cc3a1cbda27fcd9c2ad4c772bd753ccef1e178f901a70e6ff4191d4a8684e9, name: mobymask_v2-mobymask-watcher-db-1, ports: 127.0.0.1:15432->5432/tcp
# id: c9806f78680d68292ffe942222af2003aa3ed5d5c69d7121b573f5028444391d, name: mobymask_v2-mobymask-watcher-server-1, ports: 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp # id: c9806f78680d68292ffe942222af2003aa3ed5d5c69d7121b573f5028444391d, name: mobymask_v2-mobymask-watcher-server-1, ports: 127.0.0.1:3001->3001/tcp, 127.0.0.1:9001->9001/tcp, 127.0.0.1:9090->9090/tcp
# id: 6b30a1d313a88fb86f8a3b37a1b1a3bc053f238664e4b2d196c3ec74e04faf13, name: mobymask_v2-peer-tests-1, ports: # id: 6b30a1d313a88fb86f8a3b37a1b1a3bc053f238664e4b2d196c3ec74e04faf13, name: mobymask_v2-peer-tests-1, ports:
@ -138,8 +122,8 @@ To list down and monitor the running containers:
# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES # CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
# 6b30a1d313a8 cerc/watcher-ts:local "docker-entrypoint.s…" 5 minutes ago Up 4 minutes mobymask_v2-peer-tests-1 # 6b30a1d313a8 cerc/watcher-ts:local "docker-entrypoint.s…" 5 minutes ago Up 4 minutes mobymask_v2-peer-tests-1
# c9806f78680d cerc/watcher-mobymask-v2:local "sh start-server.sh" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:3001->3001/tcp, 0.0.0.0:9001->9001/tcp, 0.0.0.0:9090->9090/tcp mobymask_v2-mobymask-watcher-server-1 # c9806f78680d cerc/watcher-mobymask-v2:local "sh start-server.sh" 5 minutes ago Up 5 minutes (healthy) 127.0.0.1:3001->3001/tcp, 127.0.0.1:9001->9001/tcp, 127.0.0.1:9090->9090/tcp mobymask_v2-mobymask-watcher-server-1
# 25cc3a1cbda2 postgres:14-alpine "docker-entrypoint.s…" 5 minutes ago Up 5 minutes (healthy) 0.0.0.0:15432->5432/tcp mobymask_v2-mobymask-watcher-db-1 # 25cc3a1cbda2 postgres:14-alpine "docker-entrypoint.s…" 5 minutes ago Up 5 minutes (healthy) 127.0.0.1:15432->5432/tcp mobymask_v2-mobymask-watcher-db-1
# Check logs for a container # Check logs for a container

View File

@ -78,7 +78,7 @@ To monitor the running container:
# Expected output: # Expected output:
# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES # CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
# f1369dbae1c9 cerc/mobymask-ui:local "docker-entrypoint.s…" 2 minutes ago Up 2 minutes (healthy) 0.0.0.0:3004->80/tcp mm_v2-lxdao-mobymask-app-1 # f1369dbae1c9 cerc/mobymask-ui:local "docker-entrypoint.s…" 2 minutes ago Up 2 minutes (healthy) 127.0.0.1:3004->80/tcp mm_v2-lxdao-mobymask-app-1
# Check logs for a container # Check logs for a container
docker logs -f mm_v2-lxdao-mobymask-app-1 docker logs -f mm_v2-lxdao-mobymask-app-1

View File

@ -47,14 +47,14 @@ Create and update an env file to be used in the next step ([defaults](../../conf
For running mobymask-app For running mobymask-app
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include mobymask-app --env-file <PATH_TO_ENV_FILE> up laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include mobymask-app --env-file <PATH_TO_ENV_FILE> up
# Runs mobymask-app on host port 3002 and lxdao-mobymask-app on host port 3004 # Runs mobymask-app on host port 3002 and lxdao-mobymask-app on host port 3004
``` ```
For running peer-test-app For running peer-test-app
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include peer-test-app --env-file <PATH_TO_ENV_FILE> up laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include peer-test-app --env-file <PATH_TO_ENV_FILE> up
# Runs on host port 3003 # Runs on host port 3003
``` ```
@ -62,9 +62,10 @@ laconic-so --stack mobymask-v2 deploy --include peer-test-app --env-file <PATH_T
To list down and monitor the running containers: To list down and monitor the running containers:
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include [mobymask-app | peer-test-app] ps laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include [mobymask-app | peer-test-app] ps
docker ps # With status
docker ps -a
# Check logs for a container # Check logs for a container
docker logs -f <CONTAINER_ID> docker logs -f <CONTAINER_ID>
@ -80,20 +81,20 @@ Stop all services running in the background:
For mobymask-app For mobymask-app
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include mobymask-app down laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include mobymask-app down
``` ```
For peer-test-app For peer-test-app
```bash ```bash
laconic-so --stack mobymask-v2 deploy --include peer-test-app down laconic-so --stack mobymask-v2 deploy --cluster mobymask_v2 --include peer-test-app down
``` ```
Clear volumes created by this stack: Clear volumes created by this stack:
```bash ```bash
# List all relevant volumes # List all relevant volumes
docker volume ls -q --filter "name=.*mobymask_deployment|.*peers_ids" docker volume ls -q --filter "name=mobymask_v2"
# Remove all the listed volumes # Remove all the listed volumes
docker volume rm $(docker volume ls -q --filter "name=.*mobymask_deployment|.*peers_ids") docker volume rm $(docker volume ls -q --filter "name=mobymask_v2")
``` ```

View File

@ -0,0 +1,65 @@
# Reth
Deploy a Reth API node alongside Lighthouse.
## Clone required repositories
```
$ laconic-so --stack reth setup-repositories
```
## Build the Reth stack containers
```
$ laconic-so --stack reth build-containers
```
## Deploy the stack
```
$ laconic-so --stack reth deploy up
```
## Check logs
```
$ laconic-so --stack reth deploy logs
```
Verify that your node is syncing. You should see entries similar to this from the Lighthouse container:
```
laconic-200e8f8ff7891515d777cd0f719078e3-lighthouse-1 | Jun 23 20:59:01.226 INFO New block received root: 0x9cd4a2dd9333cf802c2963c2f029deb0f94e511d2481fa0724ae8752e4c49b15, slot: 6727493
```
and entries similar to this from the Reth container:
```
laconic-200e8f8ff7891515d777cd0f719078e3-reth-1 | 2023-06-23T20:59:11.557389Z INFO reth::node::events: Stage committed progress pipeline_stages=1/13 stage=Headers block=0 checkpoint=4.9% eta=1h 3m 57s
```
## Test the API
Reth's http api is accessible on port `8545` and the websocket api is accessible on port `8546`.
```
$ curl --request POST \
--url http://localhost:8545/ \
--header 'Content-Type: application/json' \
--data '{
"jsonrpc": "2.0",
"method": "eth_blockNumber",
"params": [],
"id": 0
}'
# Response
{"jsonrpc":"2.0","result":"0x0","id":0}
```
## Clean up
Stop all services running in the background:
```bash
$ laconic-so --stack reth deploy down
```
To also delete the docker data volumes:
```bash
$ laconic-so --stack reth deploy down --delete-volumes

View File

@ -0,0 +1,10 @@
version: "1.1"
name: reth
decription: "Reth node"
repos:
- github.com/paradigmxyz/reth
containers:
- cerc/reth
- cerc/lighthouse
pods:
- reth

View File

@ -21,12 +21,15 @@ import os
import sys import sys
from dataclasses import dataclass from dataclasses import dataclass
from decouple import config from decouple import config
from importlib import resources
import subprocess import subprocess
from python_on_whales import DockerClient, DockerException from python_on_whales import DockerClient, DockerException
import click import click
import importlib.resources
from pathlib import Path from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config from .util import include_exclude_check, get_parsed_stack_config, global_options2
from .deployment_create import create as deployment_create
from .deployment_create import init as deployment_init
class DeployCommandContext(object): class DeployCommandContext(object):
def __init__(self, cluster_context, docker): def __init__(self, cluster_context, docker):
@ -43,44 +46,40 @@ class DeployCommandContext(object):
def command(ctx, include, exclude, env_file, cluster): def command(ctx, include, exclude, env_file, cluster):
'''deploy a stack''' '''deploy a stack'''
cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster, env_file) if ctx.parent.obj.debug:
print(f"ctx.parent.obj: {ctx.parent.obj}")
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/ ctx.obj = create_deploy_context(global_options2(ctx), global_options2(ctx).stack, include, exclude, cluster, env_file)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
ctx.obj = DeployCommandContext(cluster_context, docker)
# Subcommand is executed now, by the magic of click # Subcommand is executed now, by the magic of click
@command.command() def create_deploy_context(global_context, stack, include, exclude, cluster, env_file):
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2> cluster_context = _make_cluster_context(global_context, stack, include, exclude, cluster, env_file)
@click.pass_context # See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
def up(ctx, extra_args): docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
return DeployCommandContext(cluster_context, docker)
def up_operation(ctx, services_list):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None deploy_context = ctx.obj
if not global_context.dry_run: if not global_context.dry_run:
cluster_context = ctx.obj.cluster_context cluster_context = deploy_context.cluster_context
container_exec_env = _make_runtime_env(global_context) container_exec_env = _make_runtime_env(global_context)
for attr, value in container_exec_env.items(): for attr, value in container_exec_env.items():
os.environ[attr] = value os.environ[attr] = value
if global_context.verbose: if global_context.verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}") print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {services_list}")
for pre_start_command in cluster_context.pre_start_commands: for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command) _run_command(global_context, cluster_context.cluster, pre_start_command)
ctx.obj.docker.compose.up(detach=True, services=extra_args_list) deploy_context.docker.compose.up(detach=True, services=services_list)
for post_start_command in cluster_context.post_start_commands: for post_start_command in cluster_context.post_start_commands:
_run_command(global_context, cluster_context.cluster, post_start_command) _run_command(global_context, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(global_context, cluster_context.config, ctx.obj.docker, container_exec_env) _orchestrate_cluster_config(global_context, cluster_context.config, deploy_context.docker, container_exec_env)
@command.command() def down_operation(ctx, delete_volumes, extra_args_list):
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run: if not global_context.dry_run:
if global_context.verbose: if global_context.verbose:
print("Running compose down") print("Running compose down")
@ -91,9 +90,7 @@ def down(ctx, delete_volumes, extra_args):
ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes) ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes)
@command.command() def ps_operation(ctx):
@click.pass_context
def ps(ctx):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
if not global_context.dry_run: if not global_context.dry_run:
if global_context.verbose: if global_context.verbose:
@ -118,10 +115,7 @@ def ps(ctx):
print("No containers running") print("No containers running")
@command.command() def port_operation(ctx, extra_args):
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
@click.pass_context
def port(ctx, extra_args):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None extra_args_list = list(extra_args) or None
if not global_context.dry_run: if not global_context.dry_run:
@ -136,10 +130,7 @@ def port(ctx, extra_args):
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}") print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
@command.command() def exec_operation(ctx, extra_args):
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
@click.pass_context
def exec(ctx, extra_args):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None extra_args_list = list(extra_args) or None
if not global_context.dry_run: if not global_context.dry_run:
@ -157,10 +148,7 @@ def exec(ctx, extra_args):
print(f"container command returned error exit status") print(f"container command returned error exit status")
@command.command() def logs_operation(ctx, extra_args):
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context
def logs(ctx, extra_args):
global_context = ctx.parent.parent.obj global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None extra_args_list = list(extra_args) or None
if not global_context.dry_run: if not global_context.dry_run:
@ -170,12 +158,56 @@ def logs(ctx, extra_args):
print(logs_output) print(logs_output)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
extra_args_list = list(extra_args) or None
up_operation(ctx, extra_args_list)
@command.command()
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
extra_args_list = list(extra_args) or None
down_operation(ctx, delete_volumes, extra_args_list)
@command.command()
@click.pass_context
def ps(ctx):
ps_operation(ctx)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
@click.pass_context
def port(ctx, extra_args):
port_operation(ctx, extra_args)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
@click.pass_context
def exec(ctx, extra_args):
exec_operation(ctx, extra_args)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context
def logs(ctx, extra_args):
logs_operation(ctx, extra_args)
def get_stack_status(ctx, stack): def get_stack_status(ctx, stack):
ctx_copy = copy.copy(ctx) ctx_copy = copy.copy(ctx)
ctx_copy.stack = stack ctx_copy.stack = stack
cluster_context = _make_cluster_context(ctx_copy, None, None, None, None) cluster_context = _make_cluster_context(ctx_copy, stack, None, None, None, None)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster) docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above # TODO: refactor to avoid duplicating this code above
if ctx.verbose: if ctx.verbose:
@ -200,7 +232,8 @@ def _make_runtime_env(ctx):
return container_exec_env return container_exec_env
def _make_cluster_context(ctx, include, exclude, cluster, env_file): # stack has to be either PathLike pointing to a stack yml file, or a string with the name of a known stack
def _make_cluster_context(ctx, stack, include, exclude, cluster, env_file):
if ctx.local_stack: if ctx.local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")] dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
@ -208,14 +241,20 @@ def _make_cluster_context(ctx, include, exclude, cluster, env_file):
else: else:
dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc")) dev_root_path = os.path.expanduser(config("CERC_REPO_BASE_DIR", default="~/cerc"))
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure # TODO: huge hack, fix this
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose") # If the caller passed a path for the stack file, then we know that we can get the compose files
# from the same directory
if isinstance(stack, os.PathLike):
compose_dir = stack.parent.joinpath("compose")
else:
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
compose_dir = Path(__file__).absolute().parent.joinpath("data", "compose")
if cluster is None: if cluster is None:
# Create default unique, stable cluster name from confile file path and stack name if provided # Create default unique, stable cluster name from confile file path and stack name if provided
# TODO: change this to the config file path # TODO: change this to the config file path
path = os.path.realpath(sys.argv[0]) path = os.path.realpath(sys.argv[0])
unique_cluster_descriptor = f"{path},{ctx.stack},{include},{exclude}" unique_cluster_descriptor = f"{path},{stack},{include},{exclude}"
if ctx.debug: if ctx.debug:
print(f"pre-hash descriptor: {unique_cluster_descriptor}") print(f"pre-hash descriptor: {unique_cluster_descriptor}")
hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest() hash = hashlib.md5(unique_cluster_descriptor.encode()).hexdigest()
@ -225,12 +264,12 @@ def _make_cluster_context(ctx, include, exclude, cluster, env_file):
# See: https://stackoverflow.com/a/20885799/1701505 # See: https://stackoverflow.com/a/20885799/1701505
from . import data from . import data
with importlib.resources.open_text(data, "pod-list.txt") as pod_list_file: with resources.open_text(data, "pod-list.txt") as pod_list_file:
all_pods = pod_list_file.read().splitlines() all_pods = pod_list_file.read().splitlines()
pods_in_scope = [] pods_in_scope = []
if ctx.stack: if stack:
stack_config = get_parsed_stack_config(ctx.stack) stack_config = get_parsed_stack_config(stack)
# TODO: syntax check the input here # TODO: syntax check the input here
pods_in_scope = stack_config['pods'] pods_in_scope = stack_config['pods']
cluster_config = stack_config['config'] if 'config' in stack_config else None cluster_config = stack_config['config'] if 'config' in stack_config else None
@ -342,6 +381,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
f" = {pd.source_container}.{pd.source_variable}") f" = {pd.source_container}.{pd.source_variable}")
# TODO: add a timeout # TODO: add a timeout
waiting_for_data = True waiting_for_data = True
destination_output = "*** no output received yet ***"
while waiting_for_data: while waiting_for_data:
# TODO: fix the script paths so they're consistent between containers # TODO: fix the script paths so they're consistent between containers
source_value = None source_value = None
@ -376,3 +416,7 @@ def _orchestrate_cluster_config(ctx, cluster_config, docker, container_exec_env)
waiting_for_data = False waiting_for_data = False
if ctx.debug: if ctx.debug:
print(f"destination output: {destination_output}") print(f"destination output: {destination_output}")
command.add_command(deployment_init)
command.add_command(deployment_create)

140
app/deployment.py Normal file
View File

@ -0,0 +1,140 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from dataclasses import dataclass
from pathlib import Path
import sys
from .deploy import up_operation, down_operation, ps_operation, port_operation, exec_operation, logs_operation, create_deploy_context
from .util import global_options
@dataclass
class DeploymentContext:
dir: Path
@click.group()
@click.option("--dir", required=True, help="path to deployment directory")
@click.pass_context
def command(ctx, dir):
# Check that --stack wasn't supplied
if ctx.parent.obj.stack:
print("Error: --stack can't be supplied with the deployment command")
sys.exit(1)
# Check dir is valid
dir_path = Path(dir)
if not dir_path.exists():
print(f"Error: deployment directory {dir} does not exist")
sys.exit(1)
if not dir_path.is_dir():
print(f"Error: supplied deployment directory path {dir} exists but is a file not a directory")
sys.exit(1)
# Store the deployment context for subcommands
ctx.obj = DeploymentContext(dir_path)
def make_deploy_context(ctx):
# Get the stack config file name
stack_file_path = ctx.obj.dir.joinpath("stack.yml")
# TODO: add cluster name and env file here
return create_deploy_context(ctx.parent.parent.obj, stack_file_path, None, None, None, None)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
services_list = list(extra_args) or None
up_operation(ctx, services_list)
# start is the preferred alias for up
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def start(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
services_list = list(extra_args) or None
up_operation(ctx, services_list)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
@click.pass_context
def down(ctx, extra_args):
# Get the stack config file name
# TODO: add cluster name and env file here
ctx.obj = make_deploy_context(ctx)
down_operation(ctx, extra_args, None)
# stop is the preferred alias for down
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: down <service1> <service2>
@click.pass_context
def stop(ctx, extra_args):
# TODO: add cluster name and env file here
ctx.obj = make_deploy_context(ctx)
down_operation(ctx, extra_args, None)
@command.command()
@click.pass_context
def ps(ctx):
ctx.obj = make_deploy_context(ctx)
ps_operation(ctx)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
@click.pass_context
def port(ctx, extra_args):
port_operation(ctx, extra_args)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
@click.pass_context
def exec(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
exec_operation(ctx, extra_args)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context
def logs(ctx, extra_args):
ctx.obj = make_deploy_context(ctx)
logs_operation(ctx, extra_args)
@command.command()
@click.pass_context
def status(ctx):
print(f"Context: {ctx.parent.obj}")
#from importlib import resources, util
# TODO: figure out how to do this dynamically
#stack = "mainnet-laconic"
#module_name = "commands"
#spec = util.spec_from_file_location(module_name, "./app/data/stacks/" + stack + "/deploy/commands.py")
#imported_stack = util.module_from_spec(spec)
#spec.loader.exec_module(imported_stack)
#command.add_command(imported_stack.init)
#command.add_command(imported_stack.create)

155
app/deployment_create.py Normal file
View File

@ -0,0 +1,155 @@
# Copyright © 2022, 2023 Cerc
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
import os
from pathlib import Path
from shutil import copyfile, copytree
import sys
import ruamel.yaml
from .util import get_stack_file_path, get_parsed_deployment_spec, get_parsed_stack_config, global_options
def _get_yaml():
# See: https://stackoverflow.com/a/45701840/1701505
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
yaml.indent(sequence=3, offset=1)
return yaml
def _make_default_deployment_dir():
return "deployment-001"
def _get_compose_file_dir():
# TODO: refactor to use common code with deploy command
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
data_dir = Path(__file__).absolute().parent.joinpath("data")
source_compose_dir = data_dir.joinpath("compose")
return source_compose_dir
def _get_named_volumes(stack):
# Parse the compose files looking for named volumes
named_volumes = []
parsed_stack = get_parsed_stack_config(stack)
pods = parsed_stack["pods"]
yaml = _get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
if "volumes" in parsed_pod_file:
volumes = parsed_pod_file["volumes"]
for volume in volumes.keys():
# Volume definition looks like:
# 'laconicd-data': None
named_volumes.append(volume)
return named_volumes
# If we're mounting a volume from a relatie path, then we
# assume the directory doesn't exist yet and create it
# so the deployment will start
# Also warn if the path is absolute and doesn't exist
def _create_bind_dir_if_relative(volume, path_string, compose_dir):
path = Path(path_string)
if not path.is_absolute():
absolute_path = Path(compose_dir).parent.joinpath(path)
absolute_path.mkdir(parents=True, exist_ok=True)
else:
if not path.exists():
print(f"WARNING: mount path for volume {volume} does not exist: {path_string}")
# See: https://stackoverflow.com/questions/45699189/editing-docker-compose-yml-with-pyyaml
def _fixup_pod_file(pod, spec, compose_dir):
# Fix up volumes
if "volumes" in spec:
spec_volumes = spec["volumes"]
if "volumes" in pod:
pod_volumes = pod["volumes"]
for volume in pod_volumes.keys():
if volume in spec_volumes:
volume_spec = spec_volumes[volume]
volume_spec_fixedup = volume_spec if Path(volume_spec).is_absolute() else f".{volume_spec}"
_create_bind_dir_if_relative(volume, volume_spec, compose_dir)
new_volume_spec = {"driver": "local",
"driver_opts": {
"type": "none",
"device": volume_spec_fixedup,
"o": "bind"
}
}
pod["volumes"][volume] = new_volume_spec
@click.command()
@click.option("--output", required=True, help="Write yaml spec file here")
@click.pass_context
def init(ctx, output):
yaml = _get_yaml()
stack = global_options(ctx).stack
verbose = global_options(ctx).verbose
spec_file_content = {"stack": stack}
if verbose:
print(f"Creating spec file for stack: {stack}")
named_volumes = _get_named_volumes(stack)
if named_volumes:
volume_descriptors = {}
for named_volume in named_volumes:
volume_descriptors[named_volume] = f"../data/{named_volume}"
spec_file_content["volumes"] = volume_descriptors
with open(output, "w") as output_file:
yaml.dump(spec_file_content, output_file)
@click.command()
@click.option("--spec-file", required=True, help="Spec file to use to create this deployment")
@click.option("--deployment-dir", help="Create deployment files in this directory")
@click.pass_context
def create(ctx, spec_file, deployment_dir):
# This function fails with a useful error message if the file doens't exist
parsed_spec = get_parsed_deployment_spec(spec_file)
stack_name = parsed_spec['stack']
stack_file = get_stack_file_path(stack_name)
parsed_stack = get_parsed_stack_config(stack_name)
if global_options(ctx).debug:
print(f"parsed spec: {parsed_spec}")
if deployment_dir is None:
deployment_dir = _make_default_deployment_dir()
if os.path.exists(deployment_dir):
print(f"Error: {deployment_dir} already exists")
sys.exit(1)
os.mkdir(deployment_dir)
# Copy spec file and the stack file into the deployment dir
copyfile(spec_file, os.path.join(deployment_dir, os.path.basename(spec_file)))
copyfile(stack_file, os.path.join(deployment_dir, os.path.basename(stack_file)))
# Copy the pod files into the deployment dir, fixing up content
pods = parsed_stack['pods']
destination_compose_dir = os.path.join(deployment_dir, "compose")
os.mkdir(destination_compose_dir)
data_dir = Path(__file__).absolute().parent.joinpath("data")
yaml = _get_yaml()
for pod in pods:
pod_file_path = os.path.join(_get_compose_file_dir(), f"docker-compose-{pod}.yml")
parsed_pod_file = yaml.load(open(pod_file_path, "r"))
_fixup_pod_file(parsed_pod_file, parsed_spec, destination_compose_dir)
with open(os.path.join(destination_compose_dir, os.path.basename(pod_file_path)), "w") as output_file:
yaml.dump(parsed_pod_file, output_file)
# Copy the config files for the pod, if any
source_config_dir = data_dir.joinpath("config", pod)
if os.path.exists(source_config_dir):
copytree(source_config_dir, os.path.join(deployment_dir, "config", pod))

View File

@ -69,8 +69,26 @@ def host_and_path_for_repo(fully_qualified_repo):
return repo_host_split[0], "/".join(repo_host_split[1:]), repo_branch return repo_host_split[0], "/".join(repo_host_split[1:]), repo_branch
# See: https://stackoverflow.com/questions/18659425/get-git-current-branch-tag-name
def _get_repo_current_branch_or_tag(full_filesystem_repo_path):
current_repo_branch_or_tag = "***UNDETERMINED***"
is_branch = False
try:
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).active_branch.name
is_branch = True
except TypeError as error:
# This means that the current ref is not a branch, so possibly a tag
# Let's try to get the tag
current_repo_branch_or_tag = git.Repo(full_filesystem_repo_path).git.describe("--tags", "--exact-match")
# Note that git is assymetric -- the tag you told it to check out may not be the one
# you get back here (if there are multiple tags associated with the same commit)
return current_repo_branch_or_tag, is_branch
# TODO: fix the messy arg list here # TODO: fix the messy arg list here
def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo): def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
if verbose:
print(f"Processing repo: {fully_qualified_repo}")
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo) repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
git_ssh_prefix = f"git@{repo_host}:" git_ssh_prefix = f"git@{repo_host}:"
git_http_prefix = f"https://{repo_host}/" git_http_prefix = f"https://{repo_host}/"
@ -78,9 +96,9 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
repoName = repo_path.split("/")[-1] repoName = repo_path.split("/")[-1]
full_filesystem_repo_path = os.path.join(dev_root_path, repoName) full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
is_present = os.path.isdir(full_filesystem_repo_path) is_present = os.path.isdir(full_filesystem_repo_path)
current_repo_branch = git.Repo(full_filesystem_repo_path).active_branch.name if is_present else None (current_repo_branch_or_tag, is_branch) = _get_repo_current_branch_or_tag(full_filesystem_repo_path) if is_present else (None, None)
if not quiet: if not quiet:
present_text = f"already exists active branch: {current_repo_branch}" if is_present \ present_text = f"already exists active {'branch' if is_branch else 'tag'}: {current_repo_branch_or_tag}" if is_present \
else 'Needs to be fetched' else 'Needs to be fetched'
print(f"Checking: {full_filesystem_repo_path}: {present_text}") print(f"Checking: {full_filesystem_repo_path}: {present_text}")
# Quick check that it's actually a repo # Quick check that it's actually a repo
@ -93,9 +111,12 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
if verbose: if verbose:
print(f"Running git pull for {full_filesystem_repo_path}") print(f"Running git pull for {full_filesystem_repo_path}")
if not check_only: if not check_only:
git_repo = git.Repo(full_filesystem_repo_path) if is_branch:
origin = git_repo.remotes.origin git_repo = git.Repo(full_filesystem_repo_path)
origin.pull(progress=None if quiet else GitProgress()) origin = git_repo.remotes.origin
origin.pull(progress=None if quiet else GitProgress())
else:
print(f"skipping pull because this repo checked out a tag")
else: else:
print("(git pull skipped)") print("(git pull skipped)")
if not is_present: if not is_present:
@ -122,14 +143,15 @@ def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_pa
branch_to_checkout = repo_branch branch_to_checkout = repo_branch
if branch_to_checkout: if branch_to_checkout:
if current_repo_branch is None or (current_repo_branch and (current_repo_branch != branch_to_checkout)): if current_repo_branch_or_tag is None or (current_repo_branch_or_tag and (current_repo_branch_or_tag != branch_to_checkout)):
if not quiet: if not quiet:
print(f"switching to branch {branch_to_checkout} in repo {repo_path}") print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
git_repo = git.Repo(full_filesystem_repo_path) git_repo = git.Repo(full_filesystem_repo_path)
# git checkout works for both branches and tags
git_repo.git.checkout(branch_to_checkout) git_repo.git.checkout(branch_to_checkout)
else: else:
if verbose: if verbose:
print(f"repo {repo_path} is already switched to branch {branch_to_checkout}") print(f"repo {repo_path} is already on branch/tag {branch_to_checkout}")
def parse_branches(branches_string): def parse_branches(branches_string):

View File

@ -30,10 +30,16 @@ def include_exclude_check(s, include, exclude):
return s not in exclude_list return s not in exclude_list
def get_parsed_stack_config(stack): def get_stack_file_path(stack):
# In order to be compatible with Python 3.8 we need to use this hack to get the path: # In order to be compatible with Python 3.8 we need to use this hack to get the path:
# See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure # See: https://stackoverflow.com/questions/25389095/python-get-path-of-root-project-structure
stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml") stack_file_path = Path(__file__).absolute().parent.joinpath("data", "stacks", stack, "stack.yml")
return stack_file_path
# Caller can pass either the name of a stack, or a path to a stack file
def get_parsed_stack_config(stack):
stack_file_path = stack if isinstance(stack, os.PathLike) else get_stack_file_path(stack)
try: try:
with stack_file_path: with stack_file_path:
stack_config = yaml.safe_load(open(stack_file_path, "r")) stack_config = yaml.safe_load(open(stack_file_path, "r"))
@ -48,3 +54,27 @@ def get_parsed_stack_config(stack):
print(f"Error: stack: {stack} does not exist") print(f"Error: stack: {stack} does not exist")
print(f"Exiting, error: {error}") print(f"Exiting, error: {error}")
sys.exit(1) sys.exit(1)
def get_parsed_deployment_spec(spec_file):
spec_file_path = Path(spec_file)
try:
with spec_file_path:
deploy_spec = yaml.safe_load(open(spec_file_path, "r"))
return deploy_spec
except FileNotFoundError as error:
# We try here to generate a useful diagnostic error
print(f"Error: spec file: {spec_file_path} does not exist")
print(f"Exiting, error: {error}")
sys.exit(1)
# TODO: this is fragile wrt to the subcommand depth
# See also: https://github.com/pallets/click/issues/108
def global_options(ctx):
return ctx.parent.parent.obj
# TODO: hack
def global_options2(ctx):
return ctx.parent.obj

8
cli.py
View File

@ -19,8 +19,9 @@ from dataclasses import dataclass
from app import setup_repositories from app import setup_repositories
from app import build_containers from app import build_containers
from app import build_npms from app import build_npms
from app import deploy_system from app import deploy
from app import version from app import version
from app import deployment
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@ -54,6 +55,7 @@ def cli(ctx, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_err
cli.add_command(setup_repositories.command, "setup-repositories") cli.add_command(setup_repositories.command, "setup-repositories")
cli.add_command(build_containers.command, "build-containers") cli.add_command(build_containers.command, "build-containers")
cli.add_command(build_npms.command, "build-npms") cli.add_command(build_npms.command, "build-npms")
cli.add_command(deploy_system.command, "deploy") # deploy is an alias for deploy-system cli.add_command(deploy.command, "deploy") # deploy is an alias for deploy-system
cli.add_command(deploy_system.command, "deploy-system") cli.add_command(deploy.command, "deploy-system")
cli.add_command(deployment.command, "deployment")
cli.add_command(version.command, "version") cli.add_command(version.command, "version")

View File

@ -4,3 +4,4 @@ tqdm>=4.64.0
python-on-whales>=0.58.0 python-on-whales>=0.58.0
click>=8.1.3 click>=8.1.3
pyyaml>=6.0 pyyaml>=6.0
ruamel.yaml>=0.17.32

View File

@ -0,0 +1,44 @@
#cloud-config
# Used for easily testing stacks-in-development on cloud platforms
# Assumes Ubuntu, edit the last line if targeting a different OS
# Once SSH'd into the server, run:
# `$ cd stack-orchestrator`
# `$ git checkout <branch>
# `$ ./scripts/developer-mode-setup.sh`
# `$ source ./venv/bin/activate`
# Followed by the stack instructions.
package_update: true
package_upgrade: true
groups:
- docker
system_info:
default_user:
groups: [ docker ]
packages:
- apt-transport-https
- ca-certificates
- curl
- jq
- git
- gnupg
- lsb-release
- unattended-upgrades
- python3.10-venv
- pip
runcmd:
- mkdir -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- systemctl enable docker
- systemctl start docker
- git clone https://github.com/cerc-io/stack-orchestrator.git /home/ubuntu/stack-orchestrator

View File

@ -0,0 +1,35 @@
#cloud-config
# Used for installing Stack Orchestrator on platforms that support `cloud-init`
# Tested on Ubuntu
package_update: true
package_upgrade: true
groups:
- docker
system_info:
default_user:
groups: [ docker ]
packages:
- apt-transport-https
- ca-certificates
- curl
- jq
- git
- gnupg
- lsb-release
- unattended-upgrades
runcmd:
- mkdir -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
- apt-get update
- apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- systemctl enable docker
- systemctl start docker
- curl -L -o /usr/local/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
- chmod +x /usr/local/bin/laconic-so

View File

@ -3,7 +3,7 @@ set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x set -x
fi fi
set -e
echo "Running stack-orchestrator Ethereum fixturenet test" echo "Running stack-orchestrator Ethereum fixturenet test"
# Bit of a hack, test the most recent package # Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 ) TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
@ -15,11 +15,7 @@ reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}" echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR" echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
$TEST_TARGET_SO --stack fixturenet-eth setup-repositories $TEST_TARGET_SO --stack fixturenet-eth setup-repositories
echo "Building containers" $TEST_TARGET_SO --stack fixturenet-eth build-containers
$TEST_TARGET_SO --stack fixturenet-eth build-containers
echo "Images in registry:"
docker image ls
echo "Deploying the cluster"
$TEST_TARGET_SO --stack fixturenet-eth deploy up $TEST_TARGET_SO --stack fixturenet-eth deploy up
# Verify that the fixturenet is up and running # Verify that the fixturenet is up and running
$TEST_TARGET_SO --stack fixturenet-eth deploy ps $TEST_TARGET_SO --stack fixturenet-eth deploy ps