Update fixturenet eth test #506

Merged
telackey merged 54 commits from dboreham/update-fixturenet-eth-test into main 2023-08-17 19:24:07 +00:00
45 changed files with 722 additions and 275 deletions
Showing only changes of commit 27067e061c - Show all commits

View File

@ -0,0 +1,39 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
# Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: cerc-io/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh

View File

@ -1,4 +1,4 @@
name: Integration Test
name: Smoke Test
on:
pull_request:

29
.github/workflows/test-deploy.yml vendored Normal file
View File

@ -0,0 +1,29 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh

View File

@ -1,4 +1,4 @@
name: Test
name: Smoke Test
on:
pull_request:

View File

@ -90,7 +90,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
"CERC_CONTAINER_BASE_DIR": container_build_dir,
"CERC_HOST_UID": f"{os.getuid()}",
"CERC_HOST_GID": f"{os.getgid()}",
"DOCKER_BUILDKIT": "0"
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
}
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})

View File

@ -2,6 +2,7 @@ version: '3.7'
services:
fixturenet-eth-bootnode-geth:
restart: always
hostname: fixturenet-eth-bootnode-geth
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
@ -15,6 +16,7 @@ services:
- "30303"
fixturenet-eth-geth-1:
restart: always
hostname: fixturenet-eth-geth-1
cap_add:
- SYS_PTRACE
@ -42,6 +44,7 @@ services:
- "6060"
fixturenet-eth-geth-2:
restart: always
hostname: fixturenet-eth-geth-2
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
@ -60,12 +63,14 @@ services:
- fixturenet_eth_geth_2_data:/root/ethdata
fixturenet-eth-bootnode-lighthouse:
restart: always
hostname: fixturenet-eth-bootnode-lighthouse
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local
fixturenet-eth-lighthouse-1:
restart: always
hostname: fixturenet-eth-lighthouse-1
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -91,6 +96,7 @@ services:
- "8001"
fixturenet-eth-lighthouse-2:
restart: always
hostname: fixturenet-eth-lighthouse-2
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]

View File

@ -5,6 +5,7 @@ services:
# Creates / updates the configuration for L1 contracts deployment
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
fixturenet-optimism-contracts:
restart: on-failure
hostname: fixturenet-optimism-contracts
image: cerc/optimism-contracts:local
env_file:
@ -35,6 +36,7 @@ services:
# Generates the config files required for L2 (outputs to volume l2_config)
op-node-l2-config-gen:
restart: on-failure
image: cerc/optimism-op-node:local
depends_on:
fixturenet-optimism-contracts:
@ -54,6 +56,7 @@ services:
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
restart: always
image: cerc/optimism-l2geth:local
depends_on:
op-node-l2-config-gen:
@ -76,6 +79,7 @@ services:
# Runs the L2 consensus client (Sequencer node)
op-node:
restart: always
image: cerc/optimism-op-node:local
depends_on:
op-geth:
@ -103,6 +107,7 @@ services:
# Runs the batcher (takes transactions from the Sequencer and publishes them to L1)
op-batcher:
restart: always
image: cerc/optimism-op-batcher:local
depends_on:
op-node:
@ -129,6 +134,7 @@ services:
# Runs the proposer (periodically submits new state roots to L1)
op-proposer:
restart: always
image: cerc/optimism-op-proposer:local
depends_on:
op-node:

View File

@ -0,0 +1,129 @@
services:
fixturenet-eth-bootnode-geth:
restart: always
hostname: fixturenet-eth-bootnode-geth
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-plugeth:local
volumes:
- fixturenet_plugeth_bootnode_geth_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
ports:
- "9898"
- "30303"
fixturenet-eth-geth-1:
restart: always
hostname: fixturenet-eth-geth-1
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-plugeth:local
volumes:
- fixturenet_plugeth_geth_1_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8545/"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
depends_on:
- fixturenet-eth-bootnode-geth
ports:
- "8545"
- "40000"
- "6060"
fixturenet-eth-geth-2:
restart: always
hostname: fixturenet-eth-geth-2
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8545/"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
environment:
CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true"
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-plugeth:local
depends_on:
- fixturenet-eth-bootnode-geth
volumes:
- fixturenet_plugeth_geth_2_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
fixturenet-eth-bootnode-lighthouse:
restart: always
hostname: fixturenet-eth-bootnode-lighthouse
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local
fixturenet-eth-lighthouse-1:
restart: always
hostname: fixturenet-eth-lighthouse-1
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
interval: 30s
timeout: 10s
retries: 10
start_period: 30s
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
NODE_NUMBER: "1"
ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551"
image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_plugeth_lighthouse_1_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
fixturenet-eth-geth-1:
condition: service_healthy
ports:
- "8001"
fixturenet-eth-lighthouse-2:
restart: always
hostname: fixturenet-eth-lighthouse-2
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
interval: 30s
timeout: 10s
retries: 10
start_period: 30s
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
NODE_NUMBER: "2"
ETH1_ENDPOINT: "http://fixturenet-eth-geth-2:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551"
LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0"
image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_plugeth_lighthouse_2_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
fixturenet-eth-geth-2:
condition: service_healthy
volumes:
fixturenet_plugeth_bootnode_geth_data:
fixturenet_plugeth_geth_1_data:
fixturenet_plugeth_geth_2_data:
fixturenet_plugeth_lighthouse_1_data:
fixturenet_plugeth_lighthouse_2_data:

View File

@ -1,6 +1,7 @@
# Add-on pod to include foundry tooling within a fixturenet
services:
foundry:
restart: always
image: cerc/foundry:local
command: ["while :; do sleep 600; done"]
volumes:

View File

@ -0,0 +1 @@
See: https://docs.plugeth.org/

View File

@ -27,8 +27,8 @@ yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
# If it doesn't exist there will be no .data.dist.tarball element,
# and jq will output the string "null"
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
if [[ $package_tarball == "null" ]]; then
echo "FATAL: Target package version ($versioned_target_package) not found" >&2
if [[ "$yarn_info_output" == "" || $package_tarball == "null" ]]; then
echo "FATAL: Target package version ($versioned_target_package) not found (or bad npm auth token)" >&2
exit 1
fi
# Code below parses out the values we need

View File

@ -6,7 +6,7 @@ RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM cerc/go-ethereum:local as geth
FROM alpine:latest
FROM alpine:3.17
RUN apk add --no-cache python3 python3-dev py3-pip curl wget jq build-base gettext libintl openssl bash bind-tools postgresql-client
COPY --from=delve /go/bin/dlv /usr/local/bin/

View File

@ -13,22 +13,26 @@ DEBUG_LEVEL=${1:-info}
echo "Starting bootnode"
if [ ! -f "$DATADIR/bootnode/enr.dat" ]; then
echo "Generating bootnode enr"
lcli \
generate-bootnode-enr \
--ip $ENR_IP \
--udp-port $BOOTNODE_PORT \
--tcp-port $BOOTNODE_PORT \
--genesis-fork-version $GENESIS_FORK_VERSION \
--output-dir $DATADIR/bootnode
bootnode_enr=`cat $DATADIR/bootnode/enr.dat`
echo "- $bootnode_enr" > $TESTNET_DIR/boot_enr.yaml
echo "Generated bootnode enr and written to $TESTNET_DIR/boot_enr.yaml"
# Clean up existing ENR dir to avoid node connectivity issues on a restart
if [ -d "$DATADIR/bootnode" ]; then
echo "Removing existing bootnode enr directory"
rm -r "$DATADIR/bootnode"
fi
echo "Generating bootnode enr"
lcli \
generate-bootnode-enr \
--ip $ENR_IP \
--udp-port $BOOTNODE_PORT \
--tcp-port $BOOTNODE_PORT \
--genesis-fork-version $GENESIS_FORK_VERSION \
--output-dir $DATADIR/bootnode
bootnode_enr=`cat $DATADIR/bootnode/enr.dat`
echo "- $bootnode_enr" > $TESTNET_DIR/boot_enr.yaml
echo "Generated bootnode enr and written to $TESTNET_DIR/boot_enr.yaml"
exec lighthouse boot_node \
--testnet-dir $TESTNET_DIR \
--port $BOOTNODE_PORT \

View File

@ -0,0 +1,27 @@
FROM skylenet/ethereum-genesis-generator@sha256:210353ce7c898686bc5092f16c61220a76d357f51eff9c451e9ad1b9ad03d4d3 AS ethgen
FROM golang:1.19.4-bullseye AS delve
RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM ubuntu:22.04
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3 python3-dev python3-pip curl wget jq gettext gettext-base openssl bash dnsutils postgresql-client make iproute2 netcat && \
rm -rf /var/lib/apt/lists/*
COPY --from=delve /go/bin/dlv /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-testnet-genesis /usr/local/bin/
COPY --from=ethgen /usr/local/bin/eth2-val-tools /usr/local/bin/
COPY --from=ethgen /apps /apps
RUN wget -O /usr/local/bin/geth https://github.com/openrelayxyz/plugeth/releases/download/v1.11.6.1.0/geth-linux-amd64-v1.1.0-v1.11.6.1.0 && chmod a+x /usr/local/bin/geth
RUN cd /apps/el-gen && pip3 install -r requirements.txt
COPY genesis /opt/testnet
COPY run-el.sh /opt/testnet/run.sh
RUN cd /opt/testnet && make genesis-el
RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey
ENTRYPOINT ["/opt/testnet/run.sh"]

View File

@ -0,0 +1,17 @@
#!/usr/bin/env bash
# Build cerc/fixturenet-eth-plugeth
set -x
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
if [ ! -d "${SCRIPT_DIR}/genesis" ]; then
cp -frp ${SCRIPT_DIR}/../cerc-fixturenet-eth-geth/genesis ${SCRIPT_DIR}/genesis
fi
if [ ! -d "${SCRIPT_DIR}/run-el.sh" ]; then
cp -fp ${SCRIPT_DIR}/../cerc-fixturenet-eth-geth/run-el.sh ${SCRIPT_DIR}/
fi
docker build -t cerc/fixturenet-eth-plugeth:local -f ${SCRIPT_DIR}/Dockerfile ${build_command_args} $SCRIPT_DIR

View File

@ -1,35 +1,36 @@
cerc-io/ipld-eth-db
cerc-io/go-ethereum
cerc-io/ipld-eth-server
cerc-io/eth-statediff-service
cerc-io/eth-statediff-fill-service
cerc-io/ipld-eth-db-validator
cerc-io/ipld-eth-beacon-indexer
cerc-io/ipld-eth-beacon-db
cerc-io/laconicd
cerc-io/laconic-sdk
cerc-io/laconic-registry-cli
cerc-io/laconic-console
cerc-io/mobymask-watcher
cerc-io/watcher-ts
cerc-io/mobymask-v2-watcher-ts
cerc-io/MobyMask
vulcanize/uniswap-watcher-ts
vulcanize/uniswap-v3-info
vulcanize/assemblyscript
cerc-io/eth-probe
cerc-io/tx-spammer
dboreham/foundry
lirewine/gem
lirewine/debug
lirewine/crypto
lirewine/sdk
telackey/act_runner
ethereum-optimism/op-geth
ethereum-optimism/optimism
pokt-network/pocket-core
pokt-network/pocket-core-deployments
cerc-io/azimuth-watcher-ts
cerc-io/ipld-eth-state-snapshot
cerc-io/gelato-watcher-ts
filecoin-project/lotus
github.com/cerc-io/ipld-eth-db
github.com/cerc-io/go-ethereum
github.com/cerc-io/ipld-eth-server
github.com/cerc-io/eth-statediff-service
github.com/cerc-io/eth-statediff-fill-service
github.com/cerc-io/ipld-eth-db-validator
github.com/cerc-io/ipld-eth-beacon-indexer
github.com/cerc-io/ipld-eth-beacon-db
github.com/cerc-io/laconicd
github.com/cerc-io/laconic-sdk
github.com/cerc-io/laconic-registry-cli
github.com/cerc-io/laconic-console
github.com/cerc-io/mobymask-watcher
github.com/cerc-io/watcher-ts
github.com/cerc-io/mobymask-v2-watcher-ts
github.com/cerc-io/MobyMask
github.com/vulcanize/uniswap-watcher-ts
github.com/vulcanize/uniswap-v3-info
github.com/vulcanize/assemblyscript
github.com/cerc-io/eth-probe
github.com/cerc-io/tx-spammer
github.com/dboreham/foundry
github.com/lirewine/gem
github.com/lirewine/debug
github.com/lirewine/crypto
github.com/lirewine/sdk
github.com/telackey/act_runner
github.com/ethereum-optimism/op-geth
github.com/ethereum-optimism/optimism
github.com/pokt-network/pocket-core
github.com/pokt-network/pocket-core-deployments
github.com/cerc-io/azimuth-watcher-ts
github.com/cerc-io/ipld-eth-state-snapshot
github.com/cerc-io/gelato-watcher-ts
github.com/filecoin-project/lotus
git.vdb.to/cerc-io/test-project

View File

@ -1,7 +1,7 @@
version: "1.0"
name: azimuth
repos:
- cerc-io/azimuth-watcher-ts
- github.com/cerc-io/azimuth-watcher-ts
containers:
- cerc/watcher-azimuth
pods:

View File

@ -2,10 +2,10 @@ version: "1.0"
name: chain-chunker
decription: "Stack to build containers for chain-chunker"
repos:
- cerc-io/ipld-eth-state-snapshot
- cerc-io/eth-statediff-service
- cerc-io/ipld-eth-db
- cerc-io/ipld-eth-server
- github.com/cerc-io/ipld-eth-state-snapshot@v5
- github.com/cerc-io/eth-statediff-service@v5
- github.com/cerc-io/ipld-eth-db@v5
- github.com/cerc-io/ipld-eth-server@v5
containers:
- cerc/ipld-eth-state-snapshot
- cerc/eth-statediff-service

View File

@ -1,11 +1,11 @@
version: "1.0"
name: erc20-watcher
repos:
- cerc-io/go-ethereum
- cerc-io/ipld-eth-db
- cerc-io/ipld-eth-server
- cerc-io/watcher-ts
- dboreham/foundry
- github.com/cerc-io/go-ethereum
- github.com/cerc-io/ipld-eth-db
- github.com/cerc-io/ipld-eth-server
- github.com/cerc-io/watcher-ts
- github.com/dboreham/foundry
containers:
- cerc/foundry
- cerc/go-ethereum

View File

@ -1,10 +1,10 @@
version: "1.0"
name: erc721-watcher
repos:
- cerc-io/go-ethereum
- cerc-io/ipld-eth-db
- cerc-io/ipld-eth-server
- cerc-io/watcher-ts
- github.com/cerc-io/go-ethereum
- github.com/cerc-io/ipld-eth-db
- github.com/cerc-io/ipld-eth-server
- github.com/cerc-io/watcher-ts
containers:
- cerc/go-ethereum
- cerc/go-ethereum-foundry

View File

@ -2,11 +2,11 @@ version: "1.0"
name: fixturenet-eth-loaded
decription: "Loaded Ethereum Fixturenet"
repos:
- cerc-io/go-ethereum
- cerc-io/tx-spammer
- cerc-io/ipld-eth-server
- cerc-io/ipld-eth-db
- cerc/go-ethereum
- github.com/cerc-io/go-ethereum
- github.com/cerc-io/tx-spammer
- github.com/cerc-io/ipld-eth-server
- github.com/cerc-io/ipld-eth-db
- github.com/cerc-io/go-ethereum
containers:
- cerc/lighthouse
- cerc/fixturenet-eth-geth

View File

@ -2,8 +2,8 @@ version: "1.2"
name: fixturenet-eth-tx
decription: "Ethereum Fixturenet w/ tx-spammer"
repos:
- cerc-io/go-ethereum
- cerc-io/tx-spammer
- github.com/cerc-io/go-ethereum
- github.com/cerc-io/tx-spammer
- dboreham/foundry
containers:
- cerc/go-ethereum

View File

@ -66,7 +66,7 @@ It is not necessary to use them all at once, but a complete example follows:
```
# Setup
$ laconic-so setup-repositories --include cerc-io/go-ethereum,cerc-io/ipld-eth-db,cerc-io/ipld-eth-server,cerc-io/ipld-eth-beacon-db,cerc-io/ipld-eth-beacon-indexer,cerc-io/eth-probe,cerc-io/tx-spammer
$ laconic-so setup-repositories --include github.com/cerc-io/go-ethereum,github.com/cerc-io/ipld-eth-db,github.com/cerc-io/ipld-eth-server,github.com/cerc-io/ipld-eth-beacon-db,github.com/cerc-io/ipld-eth-beacon-indexer,github.com/cerc-io/eth-probe,github.com/cerc-io/tx-spammer
# Build
$ laconic-so build-containers --include cerc/go-ethereum,cerc/lighthouse,cerc/fixturenet-eth-geth,cerc/fixturenet-eth-lighthouse,cerc/ipld-eth-db,cerc/ipld-eth-server,cerc/ipld-eth-beacon-db,cerc/ipld-eth-beacon-indexer,cerc/eth-probe,cerc/keycloak,cerc/tx-spammer

View File

@ -2,8 +2,8 @@ version: "1.1"
name: fixturenet-eth
decription: "Ethereum Fixturenet"
repos:
- cerc-io/go-ethereum
- dboreham/foundry
- github.com/cerc-io/go-ethereum
- github.com/dboreham/foundry
containers:
- cerc/go-ethereum
- cerc/lighthouse

View File

@ -2,14 +2,14 @@ version: "1.1"
name: fixturenet-laconic-loaded
description: "A full featured laconic fixturenet"
repos:
- cerc-io/laconicd
- lirewine/debug
- lirewine/crypto
- lirewine/gem
- lirewine/sdk
- cerc-io/laconic-sdk
- cerc-io/laconic-registry-cli
- cerc-io/laconic-console
- github.com/cerc-io/laconicd
- github.com/lirewine/debug
- github.com/lirewine/crypto
- github.com/lirewine/gem
- github.com/lirewine/sdk
- github.com/cerc-io/laconic-sdk
- github.com/cerc-io/laconic-registry-cli
- github.com/cerc-io/laconic-console
npms:
- laconic-sdk
- laconic-registry-cli

View File

@ -2,9 +2,9 @@ version: "1.0"
name: fixturenet-laconicd
description: "A laconicd fixturenet"
repos:
- cerc-io/laconicd
- cerc-io/laconic-sdk
- cerc-io/laconic-registry-cli
- github.com/cerc-io/laconicd
- github.com/cerc-io/laconic-sdk
- github.com/cerc-io/laconic-registry-cli
npms:
- laconic-sdk
- laconic-registry-cli

View File

@ -2,7 +2,7 @@ version: "1.0"
name: fixturenet-lotus
description: "A lotus fixturenet"
repos:
- filecoin-project/lotus
- github.com/filecoin-project/lotus
containers:
- cerc/lotus
pods:

View File

@ -9,7 +9,7 @@ Prerequisite: An L1 Ethereum RPC endpoint
Clone required repositories:
```bash
laconic-so --stack fixturenet-optimism setup-repositories --exclude cerc-io/go-ethereum
laconic-so --stack fixturenet-optimism setup-repositories --exclude github.com/cerc-io/go-ethereum
# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command
```

View File

@ -2,10 +2,10 @@ version: "1.0"
name: fixturenet-optimism
decription: "Optimism Fixturenet"
repos:
- cerc-io/go-ethereum
- dboreham/foundry
- ethereum-optimism/optimism
- ethereum-optimism/op-geth
- github.com/cerc-io/go-ethereum
- github.com/dboreham/foundry
- github.com/ethereum-optimism/optimism
- github.com/ethereum-optimism/op-geth
containers:
- cerc/go-ethereum
- cerc/lighthouse

View File

@ -0,0 +1,19 @@
# fixturenet-plugeth-tx
A variation of `fixturenet-eth` that uses `plugeth` instead of `go-ethereum`.
See `stacks/fixturenet-eth/README.md` for more information.
## Containers
* cerc/lighthouse
* cerc/fixturenet-eth-plugeth
* cerc/fixturenet-eth-lighthouse
* cerc/tx-spammer
## Deploy the stack
```
$ laconic-so --stack fixturenet-plugeth-tx setup-repositories
$ laconic-so --stack fixturenet-plugeth-tx build-containers
$ laconic-so --stack fixturenet-plugeth-tx deploy up
```

View File

@ -0,0 +1,13 @@
version: "1.2"
name: fixturenet-plugeth-tx
decription: "plugeth Ethereum Fixturenet w/ tx-spammer"
repos:
- github.com/cerc-io/tx-spammer
containers:
- cerc/lighthouse
- cerc/fixturenet-eth-plugeth
- cerc/fixturenet-eth-lighthouse
- cerc/tx-spammer
pods:
- fixturenet-plugeth
- tx-spammer

View File

@ -2,9 +2,9 @@ version: "1.0"
name: fixturenet-pocket
description: "A single node pocket chain that can serve relays from the geth-1 node in eth-fixturenet"
repos:
- cerc-io/go-ethereum
- pokt-network/pocket-core
- pokt-network/pocket-core-deployments # contains the dockerfile
- github.com/cerc-io/go-ethereum
- github.com/pokt-network/pocket-core
- github.com/pokt-network/pocket-core-deployments # contains the dockerfile
containers:
- cerc/go-ethereum
- cerc/lighthouse

View File

@ -1,7 +1,7 @@
version: "1.0"
name: gelato
repos:
- cerc-io/gelato-watcher-ts
- github.com/cerc-io/gelato-watcher-ts
containers:
- cerc/watcher-gelato
pods:

View File

@ -9,7 +9,7 @@ Prerequisite: L2 Optimism Geth and Node RPC endpoints
Clone required repositories:
```bash
laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/MobyMask,cerc-io/watcher-ts,cerc-io/mobymask-v2-watcher-ts
laconic-so --stack mobymask-v2 setup-repositories --include github.com/cerc-io/MobyMask,github.com/cerc-io/watcher-ts,github.com/cerc-io/mobymask-v2-watcher-ts
# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned below and re-run the command
```

View File

@ -1,13 +1,13 @@
version: "1.0"
name: mobymask-v2
repos:
- cerc-io/go-ethereum
- dboreham/foundry
- ethereum-optimism/optimism
- ethereum-optimism/op-geth
- cerc-io/watcher-ts
- cerc-io/mobymask-v2-watcher-ts
- cerc-io/MobyMask
- github.com/cerc-io/go-ethereum
- github.com/dboreham/foundry
- github.com/ethereum-optimism/optimism
- github.com/ethereum-optimism/op-geth
- github.com/cerc-io/watcher-ts
- github.com/cerc-io/mobymask-v2-watcher-ts
- github.com/cerc-io/MobyMask
containers:
- cerc/go-ethereum
- cerc/lighthouse

View File

@ -14,7 +14,7 @@ This demo has been tested on a `Ubuntu 22.04 LTS` machine with `8GB` of RAM
Clone required repositories:
```bash
laconic-so --stack mobymask-v2 setup-repositories --include cerc-io/MobyMask,cerc-io/watcher-ts,cerc-io/mobymask-v2-watcher-ts
laconic-so --stack mobymask-v2 setup-repositories --include github.com/cerc-io/MobyMask,github.com/cerc-io/watcher-ts,github.com/cerc-io/mobymask-v2-watcher-ts
# This will clone the required repositories at ~/cerc
# If this throws an error as a result of being already checked out to a branch/tag in a repo, remove the repositories mentioned in the next step and re-run the command

View File

@ -11,7 +11,7 @@ This deployment expects that ipld-eth-server's endpoints are available on the lo
## Clone required repositories
```
$ laconic-so setup-repositories --include cerc-io/watcher-ts
$ laconic-so setup-repositories --include github.com/cerc-io/watcher-ts
```
## Build the watcher container

View File

@ -1,7 +1,7 @@
version: "1.0"
name: mobymask-watcher
repos:
- cerc-io/watcher-ts/v0.2.19
- github.com/cerc-io/watcher-ts/v0.2.19
containers:
- cerc/watcher-mobymask
pods:

View File

@ -2,8 +2,8 @@ version: "1.1"
name: package-registry
decription: "Local Package Registry"
repos:
- cerc-io/hosting
- telackey/act_runner
- github.com/cerc-io/hosting
- gitea.com/gitea/act_runner
containers:
- cerc/act-runner
- cerc/act-runner-task-executor

View File

@ -2,7 +2,8 @@ version: "1.0"
name: test
description: "A test stack"
repos:
- cerc-io/laconicd
- github.com/cerc-io/laconicd
- git.vdb.to/cerc-io/test-project@test-branch
containers:
- cerc/test-container
pods:

View File

@ -1,8 +1,8 @@
version: "1.0"
name: uniswap-v3
repos:
- vulcanize/uniswap-watcher-ts
- vulcanize/uniswap-v3-info
- github.com/vulcanize/uniswap-watcher-ts
- github.com/vulcanize/uniswap-v3-info
containers:
- cerc/watcher-uniswap-v3
- cerc/uniswap-v3-info

View File

@ -28,108 +28,146 @@ import importlib.resources
from pathlib import Path
from .util import include_exclude_check, get_parsed_stack_config
class DeployCommandContext(object):
def __init__(self, cluster_context, docker):
self.cluster_context = cluster_context
self.docker = docker
@click.command()
@click.group()
@click.option("--include", help="only start these components")
@click.option("--exclude", help="don\'t start these components")
@click.option("--env-file", help="env file to be used")
@click.option("--cluster", help="specify a non-default cluster name")
@click.argument('command', required=True) # help: command: up|down|ps
@click.argument('extra_args', nargs=-1) # help: command: up|down|ps <service1> <service2>
@click.pass_context
def command(ctx, include, exclude, env_file, cluster, command, extra_args):
def command(ctx, include, exclude, env_file, cluster):
'''deploy a stack'''
# TODO: implement option exclusion and command value constraint lost with the move from argparse to click
debug = ctx.obj.debug
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
local_stack = ctx.obj.local_stack
dry_run = ctx.obj.dry_run
stack = ctx.obj.stack
cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster)
cluster_context = _make_cluster_context(ctx.obj, include, exclude, cluster, env_file)
# See: https://gabrieldemarmiesse.github.io/python-on-whales/sub-commands/compose/
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster, compose_env_file=env_file)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster,
compose_env_file=cluster_context.env_file)
ctx.obj = DeployCommandContext(cluster_context, docker)
# Subcommand is executed now, by the magic of click
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: up <service1> <service2>
@click.pass_context
def up(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
cluster_context = ctx.obj.cluster_context
container_exec_env = _make_runtime_env(global_context)
for attr, value in container_exec_env.items():
os.environ[attr] = value
if global_context.verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(global_context, cluster_context.cluster, pre_start_command)
ctx.obj.docker.compose.up(detach=True, services=extra_args_list)
for post_start_command in cluster_context.post_start_commands:
_run_command(global_context, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(global_context, cluster_context.config, ctx.obj.docker, container_exec_env)
if not dry_run:
if command == "up":
container_exec_env = _make_runtime_env(ctx.obj)
for attr, value in container_exec_env.items():
os.environ[attr] = value
if verbose:
print(f"Running compose up with container_exec_env: {container_exec_env}, extra_args: {extra_args_list}")
for pre_start_command in cluster_context.pre_start_commands:
_run_command(ctx.obj, cluster_context.cluster, pre_start_command)
docker.compose.up(detach=True, services=extra_args_list)
for post_start_command in cluster_context.post_start_commands:
_run_command(ctx.obj, cluster_context.cluster, post_start_command)
_orchestrate_cluster_config(ctx.obj, cluster_context.config, docker, container_exec_env)
@command.command()
@click.option("--delete-volumes/--preserve-volumes", default=False, help="delete data volumes")
@click.argument('extra_args', nargs=-1) # help: command: down<service1> <service2>
@click.pass_context
def down(ctx, delete_volumes, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
print("Running compose down")
timeout_arg = None
if extra_args_list:
timeout_arg = extra_args_list[0]
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
ctx.obj.docker.compose.down(timeout=timeout_arg, volumes=delete_volumes)
elif command == "down":
if verbose:
print("Running compose down")
timeout_arg = None
if extra_args_list:
timeout_arg=extra_args_list[0]
@command.command()
@click.pass_context
def ps(ctx):
global_context = ctx.parent.parent.obj
if not global_context.dry_run:
if global_context.verbose:
print("Running compose ps")
container_list = ctx.obj.docker.compose.ps()
if len(container_list) > 0:
print("Running containers:")
for container in container_list:
print(f"id: {container.id}, name: {container.name}, ports: ", end="")
ports = container.network_settings.ports
comma = ""
for port_mapping in ports.keys():
mapping = ports[port_mapping]
print(comma, end="")
if mapping is None:
print(f"{port_mapping}", end="")
else:
print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="")
comma = ", "
print()
else:
print("No containers running")
# Specify shutdown timeout (default 10s) to give services enough time to shutdown gracefully
docker.compose.down(timeout=timeout_arg)
elif command == "exec":
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: exec <service> <cmd>")
sys.exit(1)
service_name = extra_args_list[0]
command_to_exec = ["sh", "-c"] + extra_args_list[1:]
container_exec_env = _make_runtime_env(ctx.obj)
if verbose:
print(f"Running compose exec {service_name} {command_to_exec}")
try:
docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
except DockerException as error:
print(f"container command returned error exit status")
elif command == "port":
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: port <service> <exposed-port>")
sys.exit(1)
service_name = extra_args_list[0]
exposed_port = extra_args_list[1]
if verbose:
print(f"Running compose port {service_name} {exposed_port}")
mapped_port_data = docker.compose.port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
elif command == "ps":
if verbose:
print("Running compose ps")
container_list = docker.compose.ps()
if len(container_list) > 0:
print("Running containers:")
for container in container_list:
print(f"id: {container.id}, name: {container.name}, ports: ", end="")
ports = container.network_settings.ports
comma = ""
for port_mapping in ports.keys():
mapping = ports[port_mapping]
print(comma, end="")
if mapping is None:
print(f"{port_mapping}", end="")
else:
print(f"{mapping[0]['HostIp']}:{mapping[0]['HostPort']}->{port_mapping}", end="")
comma = ", "
print()
else:
print("No containers running")
elif command == "logs":
if verbose:
print("Running compose logs")
logs_output = docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output)
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: port <service1> <service2>
@click.pass_context
def port(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: port <service> <exposed-port>")
sys.exit(1)
service_name = extra_args_list[0]
exposed_port = extra_args_list[1]
if global_context.verbose:
print(f"Running compose port {service_name} {exposed_port}")
mapped_port_data = ctx.obj.docker.compose.port(service_name, exposed_port)
print(f"{mapped_port_data[0]}:{mapped_port_data[1]}")
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: exec <service> <command>
@click.pass_context
def exec(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if extra_args_list is None or len(extra_args_list) < 2:
print("Usage: exec <service> <cmd>")
sys.exit(1)
service_name = extra_args_list[0]
command_to_exec = ["sh", "-c"] + extra_args_list[1:]
container_exec_env = _make_runtime_env(global_context)
if global_context.verbose:
print(f"Running compose exec {service_name} {command_to_exec}")
try:
ctx.obj.docker.compose.execute(service_name, command_to_exec, envs=container_exec_env)
except DockerException as error:
print(f"container command returned error exit status")
@command.command()
@click.argument('extra_args', nargs=-1) # help: command: logs <service1> <service2>
@click.pass_context
def logs(ctx, extra_args):
global_context = ctx.parent.parent.obj
extra_args_list = list(extra_args) or None
if not global_context.dry_run:
if global_context.verbose:
print("Running compose logs")
logs_output = ctx.obj.docker.compose.logs(services=extra_args_list if extra_args_list is not None else [])
print(logs_output)
def get_stack_status(ctx, stack):
@ -137,7 +175,7 @@ def get_stack_status(ctx, stack):
ctx_copy = copy.copy(ctx)
ctx_copy.stack = stack
cluster_context = _make_cluster_context(ctx_copy, None, None, None)
cluster_context = _make_cluster_context(ctx_copy, None, None, None, None)
docker = DockerClient(compose_files=cluster_context.compose_files, compose_project_name=cluster_context.cluster)
# TODO: refactor to avoid duplicating this code above
if ctx.verbose:
@ -162,7 +200,7 @@ def _make_runtime_env(ctx):
return container_exec_env
def _make_cluster_context(ctx, include, exclude, cluster):
def _make_cluster_context(ctx, include, exclude, cluster, env_file):
if ctx.local_stack:
dev_root_path = os.getcwd()[0:os.getcwd().rindex("stack-orchestrator")]
@ -235,16 +273,17 @@ def _make_cluster_context(ctx, include, exclude, cluster):
if ctx.verbose:
print(f"files: {compose_files}")
return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config)
return cluster_context(cluster, compose_files, pre_start_commands, post_start_commands, cluster_config, env_file)
class cluster_context:
def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands, config) -> None:
def __init__(self, cluster, compose_files, pre_start_commands, post_start_commands, config, env_file) -> None:
self.cluster = cluster
self.compose_files = compose_files
self.pre_start_commands = pre_start_commands
self.post_start_commands = post_start_commands
self.config = config
self.env_file = env_file
def _convert_to_new_format(old_pod_array):

View File

@ -52,15 +52,111 @@ def is_git_repo(path):
# )
def branch_strip(s):
return s.split('@')[0]
def host_and_path_for_repo(fully_qualified_repo):
repo_branch_split = fully_qualified_repo.split("@")
repo_branch = repo_branch_split[-1] if len(repo_branch_split) > 1 else None
repo_host_split = repo_branch_split[0].split("/")
# Legacy unqualified repo means github
if len(repo_host_split) == 2:
return "github.com", "/".join(repo_host_split), repo_branch
else:
if len(repo_host_split) == 3:
# First part is the host
return repo_host_split[0], "/".join(repo_host_split[1:]), repo_branch
# TODO: fix the messy arg list here
def process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, fully_qualified_repo):
repo_host, repo_path, repo_branch = host_and_path_for_repo(fully_qualified_repo)
git_ssh_prefix = f"git@{repo_host}:"
git_http_prefix = f"https://{repo_host}/"
full_github_repo_path = f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo_path}"
repoName = repo_path.split("/")[-1]
full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
is_present = os.path.isdir(full_filesystem_repo_path)
current_repo_branch = git.Repo(full_filesystem_repo_path).active_branch.name if is_present else None
if not quiet:
present_text = f"already exists active branch: {current_repo_branch}" if is_present \
else 'Needs to be fetched'
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
# Quick check that it's actually a repo
if is_present:
if not is_git_repo(full_filesystem_repo_path):
print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository")
sys.exit(1)
else:
if pull:
if verbose:
print(f"Running git pull for {full_filesystem_repo_path}")
if not check_only:
git_repo = git.Repo(full_filesystem_repo_path)
origin = git_repo.remotes.origin
origin.pull(progress=None if quiet else GitProgress())
else:
print("(git pull skipped)")
if not is_present:
# Clone
if verbose:
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
if not dry_run:
git.Repo.clone_from(full_github_repo_path,
full_filesystem_repo_path,
progress=None if quiet else GitProgress())
else:
print("(git clone skipped)")
# Checkout the requested branch, if one was specified
branch_to_checkout = None
if branches_array:
# Find the current repo in the branches list
print("Checking")
for repo_branch in branches_array:
repo_branch_tuple = repo_branch.split(" ")
if repo_branch_tuple[0] == branch_strip(fully_qualified_repo):
# checkout specified branch
branch_to_checkout = repo_branch_tuple[1]
else:
branch_to_checkout = repo_branch
if branch_to_checkout:
if current_repo_branch is None or (current_repo_branch and (current_repo_branch != branch_to_checkout)):
if not quiet:
print(f"switching to branch {branch_to_checkout} in repo {repo_path}")
git_repo = git.Repo(full_filesystem_repo_path)
git_repo.git.checkout(branch_to_checkout)
else:
if verbose:
print(f"repo {repo_path} is already switched to branch {branch_to_checkout}")
def parse_branches(branches_string):
if branches_string:
result_array = []
branches_directives = branches_string.split(",")
for branch_directive in branches_directives:
split_directive = branch_directive.split("@")
if len(split_directive) != 2:
print(f"Error: branch specified is not valid: {branch_directive}")
sys.exit(1)
result_array.append(f"{split_directive[0]} {split_directive[1]}")
return result_array
else:
return None
@click.command()
@click.option("--include", help="only clone these repositories")
@click.option("--exclude", help="don\'t clone these repositories")
@click.option('--git-ssh', is_flag=True, default=False)
@click.option('--check-only', is_flag=True, default=False)
@click.option('--pull', is_flag=True, default=False)
@click.option("--branches", help="override branches for repositories")
@click.option('--branches-file', help="checkout branches specified in this file")
@click.pass_context
def command(ctx, include, exclude, git_ssh, check_only, pull, branches_file):
def command(ctx, include, exclude, git_ssh, check_only, pull, branches, branches_file):
'''git clone the set of repositories required to build the complete system from source'''
quiet = ctx.obj.quiet
@ -68,16 +164,29 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches_file):
dry_run = ctx.obj.dry_run
stack = ctx.obj.stack
branches = []
branches_array = []
# TODO: branches file needs to be re-worked in the context of stacks
if branches_file:
if verbose:
print(f"loading branches from: {branches_file}")
with open(branches_file) as branches_file_open:
branches = branches_file_open.read().splitlines()
if verbose:
print(f"Branches are: {branches}")
if branches:
print("Error: can't specify both --branches and --branches-file")
sys.exit(1)
else:
if verbose:
print(f"loading branches from: {branches_file}")
with open(branches_file) as branches_file_open:
branches_array = branches_file_open.read().splitlines()
print(f"branches: {branches}")
if branches:
if branches_file:
print("Error: can't specify both --branches and --branches-file")
sys.exit(1)
else:
branches_array = parse_branches(branches)
if branches_array and verbose:
print(f"Branches are: {branches_array}")
local_stack = ctx.obj.local_stack
@ -119,64 +228,15 @@ def command(ctx, include, exclude, git_ssh, check_only, pull, branches_file):
repos = []
for repo in repos_in_scope:
if include_exclude_check(repo, include, exclude):
if include_exclude_check(branch_strip(repo), include, exclude):
repos.append(repo)
else:
if verbose:
print(f"Excluding: {repo}")
def process_repo(repo):
git_ssh_prefix = "git@github.com:"
git_http_prefix = "https://github.com/"
full_github_repo_path = f"{git_ssh_prefix if git_ssh else git_http_prefix}{repo}"
repoName = repo.split("/")[-1]
full_filesystem_repo_path = os.path.join(dev_root_path, repoName)
is_present = os.path.isdir(full_filesystem_repo_path)
if not quiet:
present_text = f"already exists active branch: {git.Repo(full_filesystem_repo_path).active_branch}" if is_present \
else 'Needs to be fetched'
print(f"Checking: {full_filesystem_repo_path}: {present_text}")
# Quick check that it's actually a repo
if is_present:
if not is_git_repo(full_filesystem_repo_path):
print(f"Error: {full_filesystem_repo_path} does not contain a valid git repository")
sys.exit(1)
else:
if pull:
if verbose:
print(f"Running git pull for {full_filesystem_repo_path}")
if not check_only:
git_repo = git.Repo(full_filesystem_repo_path)
origin = git_repo.remotes.origin
origin.pull(progress=None if quiet else GitProgress())
else:
print("(git pull skipped)")
if not is_present:
# Clone
if verbose:
print(f'Running git clone for {full_github_repo_path} into {full_filesystem_repo_path}')
if not dry_run:
git.Repo.clone_from(full_github_repo_path,
full_filesystem_repo_path,
progress=None if quiet else GitProgress())
else:
print("(git clone skipped)")
# Checkout the requested branch, if one was specified
if branches:
# Find the current repo in the branches list
for repo_branch in branches:
repo_branch_tuple = repo_branch.split(" ")
if repo_branch_tuple[0] == repo:
# checkout specified branch
branch_to_checkout = repo_branch_tuple[1]
if verbose:
print(f"checking out branch {branch_to_checkout} in repo {repo}")
git_repo = git.Repo(full_filesystem_repo_path)
git_repo.git.checkout(branch_to_checkout)
for repo in repos:
try:
process_repo(repo)
process_repo(verbose, quiet, dry_run, pull, check_only, git_ssh, dev_root_path, branches_array, repo)
except git.exc.GitCommandError as error:
print(f"\n******* git command returned error exit status:\n{error}")
sys.exit(1)

21
cli.py
View File

@ -14,6 +14,7 @@
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import click
from dataclasses import dataclass
from app import setup_repositories
from app import build_containers
@ -24,17 +25,15 @@ from app import version
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
# TODO: this seems kind of weird and heavy on boilerplate -- check it is
# the best Python can do for us.
class Options(object):
def __init__(self, stack, quiet, verbose, dry_run, local_stack, debug, continue_on_error):
self.stack = stack
self.quiet = quiet
self.verbose = verbose
self.dry_run = dry_run
self.local_stack = local_stack
self.debug = debug
self.continue_on_error = continue_on_error
@dataclass
class Options:
stack: str
quiet: bool = False
verbose: bool = False
dry_run: bool = False
local_stack: bool = False
debug: bool = False
continue_on_error: bool = False
@click.group(context_settings=CONTEXT_SETTINGS)

56
tests/deploy/run-deploy-test.sh Executable file
View File

@ -0,0 +1,56 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Dump environment variables for debugging
echo "Environment variables:"
env
# Test basic stack-orchestrator deploy
echo "Running stack-orchestrator deploy test"
# Bit of a hack, test the most recent package
TEST_TARGET_SO=$( ls -t1 ./package/laconic-so* | head -1 )
# Set a non-default repo dir
export CERC_REPO_BASE_DIR=~/stack-orchestrator-test/repo-base-dir
echo "Testing this package: $TEST_TARGET_SO"
echo "Test version command"
reported_version_string=$( $TEST_TARGET_SO version )
echo "Version reported is: ${reported_version_string}"
echo "Cloning repositories into: $CERC_REPO_BASE_DIR"
rm -rf $CERC_REPO_BASE_DIR
mkdir -p $CERC_REPO_BASE_DIR
# Test bringing the test container up and down
# with and without volume removal
$TEST_TARGET_SO --stack test setup-repositories
$TEST_TARGET_SO --stack test build-containers
$TEST_TARGET_SO --stack test deploy up
# Test deploy port command
deploy_port_output=$( $TEST_TARGET_SO --stack test deploy port test 80 )
if [[ "$deploy_port_output" =~ ^0.0.0.0:[1-9][0-9]* ]]; then
echo "Deploy port test: passed"
else
echo "Deploy port test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down
# The next time we bring the container up the volume will be old (from the previous run above)
$TEST_TARGET_SO --stack test deploy up
log_output_1=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_1" == *"Filesystem is old"* ]]; then
echo "Retain volumes test: passed"
else
echo "Retain volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
# Now when we bring the container up the volume will be new again
$TEST_TARGET_SO --stack test deploy up
log_output_2=$( $TEST_TARGET_SO --stack test deploy logs )
if [[ "$log_output_2" == *"Filesystem is fresh"* ]]; then
echo "Delete volumes test: passed"
else
echo "Delete volumes test: FAILED"
exit 1
fi
$TEST_TARGET_SO --stack test deploy down --delete-volumes
echo "Test passed"